hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
d37568226562e528e562d959cb9bb5955a2686754c237d1764a4cd05446d8e22 | import copy
import json
import re
from functools import partial, update_wrapper
from urllib.parse import quote as urlquote
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.admin import helpers, widgets
from django.contrib.admin.checks import (
BaseModelAdminChecks,
InlineModelAdminChecks,
ModelAdminChecks,
)
from django.contrib.admin.decorators import display
from django.contrib.admin.exceptions import DisallowedModelAdminToField
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
NestedObjects,
construct_change_message,
flatten_fieldsets,
get_deleted_objects,
lookup_spawns_duplicates,
model_format_dict,
model_ngettext,
quote,
unquote,
)
from django.contrib.admin.widgets import AutocompleteSelect, AutocompleteSelectMultiple
from django.contrib.auth import get_permission_codename
from django.core.exceptions import (
FieldDoesNotExist,
FieldError,
PermissionDenied,
ValidationError,
)
from django.core.paginator import Paginator
from django.db import models, router, transaction
from django.db.models.constants import LOOKUP_SEP
from django.forms.formsets import DELETION_FIELD_NAME, all_valid
from django.forms.models import (
BaseInlineFormSet,
inlineformset_factory,
modelform_defines_fields,
modelform_factory,
modelformset_factory,
)
from django.forms.widgets import CheckboxSelectMultiple, SelectMultiple
from django.http import HttpResponseRedirect
from django.http.response import HttpResponseBase
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.html import format_html
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.text import (
capfirst,
format_lazy,
get_text_list,
smart_split,
unescape_string_literal,
)
from django.utils.translation import gettext as _
from django.utils.translation import ngettext
from django.views.decorators.csrf import csrf_protect
from django.views.generic import RedirectView
IS_POPUP_VAR = "_popup"
TO_FIELD_VAR = "_to_field"
HORIZONTAL, VERTICAL = 1, 2
def get_content_type_for_model(obj):
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(obj, for_concrete_model=False)
def get_ul_class(radio_style):
return "radiolist" if radio_style == VERTICAL else "radiolist inline"
class IncorrectLookupParameters(Exception):
pass
# Defaults for formfield_overrides. ModelAdmin subclasses can change this
# by adding to ModelAdmin.formfield_overrides.
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
"form_class": forms.SplitDateTimeField,
"widget": widgets.AdminSplitDateTime,
},
models.DateField: {"widget": widgets.AdminDateWidget},
models.TimeField: {"widget": widgets.AdminTimeWidget},
models.TextField: {"widget": widgets.AdminTextareaWidget},
models.URLField: {"widget": widgets.AdminURLFieldWidget},
models.IntegerField: {"widget": widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {"widget": widgets.AdminBigIntegerFieldWidget},
models.CharField: {"widget": widgets.AdminTextInputWidget},
models.ImageField: {"widget": widgets.AdminFileWidget},
models.FileField: {"widget": widgets.AdminFileWidget},
models.EmailField: {"widget": widgets.AdminEmailInputWidget},
models.UUIDField: {"widget": widgets.AdminUUIDInputWidget},
}
csrf_protect_m = method_decorator(csrf_protect)
class BaseModelAdmin(metaclass=forms.MediaDefiningClass):
"""Functionality common to both ModelAdmin and InlineAdmin."""
autocomplete_fields = ()
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
sortable_by = None
view_on_site = True
show_full_result_count = True
checks_class = BaseModelAdminChecks
def check(self, **kwargs):
return self.checks_class().check(self, **kwargs)
def __init__(self):
# Merge FORMFIELD_FOR_DBFIELD_DEFAULTS with the formfield_overrides
# rather than simply overwriting.
overrides = copy.deepcopy(FORMFIELD_FOR_DBFIELD_DEFAULTS)
for k, v in self.formfield_overrides.items():
overrides.setdefault(k, {}).update(v)
self.formfield_overrides = overrides
def formfield_for_dbfield(self, db_field, request, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
return self.formfield_for_choice_field(db_field, request, **kwargs)
# ForeignKey or ManyToManyFields
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
# Combine the field kwargs with any options for formfield_overrides.
# Make sure the passed in **kwargs override anything in
# formfield_overrides because **kwargs is more specific, and should
# always win.
if db_field.__class__ in self.formfield_overrides:
kwargs = {**self.formfield_overrides[db_field.__class__], **kwargs}
# Get the correct formfield.
if isinstance(db_field, models.ForeignKey):
formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)
elif isinstance(db_field, models.ManyToManyField):
formfield = self.formfield_for_manytomany(db_field, request, **kwargs)
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
if formfield and db_field.name not in self.raw_id_fields:
related_modeladmin = self.admin_site._registry.get(
db_field.remote_field.model
)
wrapper_kwargs = {}
if related_modeladmin:
wrapper_kwargs.update(
can_add_related=related_modeladmin.has_add_permission(request),
can_change_related=related_modeladmin.has_change_permission(
request
),
can_delete_related=related_modeladmin.has_delete_permission(
request
),
can_view_related=related_modeladmin.has_view_permission(
request
),
)
formfield.widget = widgets.RelatedFieldWidgetWrapper(
formfield.widget,
db_field.remote_field,
self.admin_site,
**wrapper_kwargs,
)
return formfield
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = {**copy.deepcopy(self.formfield_overrides[klass]), **kwargs}
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if "widget" not in kwargs:
kwargs["widget"] = widgets.AdminRadioSelect(
attrs={
"class": get_ul_class(self.radio_fields[db_field.name]),
}
)
if "choices" not in kwargs:
kwargs["choices"] = db_field.get_choices(
include_blank=db_field.blank, blank_choice=[("", _("None"))]
)
return db_field.formfield(**kwargs)
def get_field_queryset(self, db, db_field, request):
"""
If the ModelAdmin specifies ordering, the queryset should respect that
ordering. Otherwise don't specify the queryset, let the field decide
(return None in that case).
"""
related_admin = self.admin_site._registry.get(db_field.remote_field.model)
if related_admin is not None:
ordering = related_admin.get_ordering(request)
if ordering is not None and ordering != ():
return db_field.remote_field.model._default_manager.using(db).order_by(
*ordering
)
return None
def formfield_for_foreignkey(self, db_field, request, **kwargs):
"""
Get a form Field for a ForeignKey.
"""
db = kwargs.get("using")
if "widget" not in kwargs:
if db_field.name in self.get_autocomplete_fields(request):
kwargs["widget"] = AutocompleteSelect(
db_field, self.admin_site, using=db
)
elif db_field.name in self.raw_id_fields:
kwargs["widget"] = widgets.ForeignKeyRawIdWidget(
db_field.remote_field, self.admin_site, using=db
)
elif db_field.name in self.radio_fields:
kwargs["widget"] = widgets.AdminRadioSelect(
attrs={
"class": get_ul_class(self.radio_fields[db_field.name]),
}
)
kwargs["empty_label"] = (
kwargs.get("empty_label", _("None")) if db_field.blank else None
)
if "queryset" not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs["queryset"] = queryset
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
"""
Get a form Field for a ManyToManyField.
"""
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if not db_field.remote_field.through._meta.auto_created:
return None
db = kwargs.get("using")
if "widget" not in kwargs:
autocomplete_fields = self.get_autocomplete_fields(request)
if db_field.name in autocomplete_fields:
kwargs["widget"] = AutocompleteSelectMultiple(
db_field,
self.admin_site,
using=db,
)
elif db_field.name in self.raw_id_fields:
kwargs["widget"] = widgets.ManyToManyRawIdWidget(
db_field.remote_field,
self.admin_site,
using=db,
)
elif db_field.name in [*self.filter_vertical, *self.filter_horizontal]:
kwargs["widget"] = widgets.FilteredSelectMultiple(
db_field.verbose_name, db_field.name in self.filter_vertical
)
if "queryset" not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs["queryset"] = queryset
form_field = db_field.formfield(**kwargs)
if (
isinstance(form_field.widget, SelectMultiple)
and form_field.widget.allow_multiple_selected
and not isinstance(
form_field.widget, (CheckboxSelectMultiple, AutocompleteSelectMultiple)
)
):
msg = _(
"Hold down “Control”, or “Command” on a Mac, to select more than one."
)
help_text = form_field.help_text
form_field.help_text = (
format_lazy("{} {}", help_text, msg) if help_text else msg
)
return form_field
def get_autocomplete_fields(self, request):
"""
Return a list of ForeignKey and/or ManyToMany fields which should use
an autocomplete widget.
"""
return self.autocomplete_fields
def get_view_on_site_url(self, obj=None):
if obj is None or not self.view_on_site:
return None
if callable(self.view_on_site):
return self.view_on_site(obj)
elif hasattr(obj, "get_absolute_url"):
# use the ContentType lookup if view_on_site is True
return reverse(
"admin:view_on_site",
kwargs={
"content_type_id": get_content_type_for_model(obj).pk,
"object_id": obj.pk,
},
current_app=self.admin_site.name,
)
def get_empty_value_display(self):
"""
Return the empty_value_display set on ModelAdmin or AdminSite.
"""
try:
return mark_safe(self.empty_value_display)
except AttributeError:
return mark_safe(self.admin_site.empty_value_display)
def get_exclude(self, request, obj=None):
"""
Hook for specifying exclude.
"""
return self.exclude
def get_fields(self, request, obj=None):
"""
Hook for specifying fields.
"""
if self.fields:
return self.fields
# _get_form_for_get_fields() is implemented in subclasses.
form = self._get_form_for_get_fields(request, obj)
return [*form.base_fields, *self.get_readonly_fields(request, obj)]
def get_fieldsets(self, request, obj=None):
"""
Hook for specifying fieldsets.
"""
if self.fieldsets:
return self.fieldsets
return [(None, {"fields": self.get_fields(request, obj)})]
def get_inlines(self, request, obj):
"""Hook for specifying custom inlines."""
return self.inlines
def get_ordering(self, request):
"""
Hook for specifying field ordering.
"""
return self.ordering or () # otherwise we might try to *None, which is bad ;)
def get_readonly_fields(self, request, obj=None):
"""
Hook for specifying custom readonly fields.
"""
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
"""
Hook for specifying custom prepopulated fields.
"""
return self.prepopulated_fields
def get_queryset(self, request):
"""
Return a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_queryset()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
def get_sortable_by(self, request):
"""Hook for specifying which fields can be sorted in the changelist."""
return (
self.sortable_by
if self.sortable_by is not None
else self.get_list_display(request)
)
def lookup_allowed(self, lookup, value):
from django.contrib.admin.filters import SimpleListFilter
model = self.model
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for fk_lookup in model._meta.related_fkey_lookups:
# As ``limit_choices_to`` can be a callable, invoke it here.
if callable(fk_lookup):
fk_lookup = fk_lookup()
if (lookup, value) in widgets.url_params_from_lookup_dict(
fk_lookup
).items():
return True
relation_parts = []
prev_field = None
for part in lookup.split(LOOKUP_SEP):
try:
field = model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on nonexistent fields are ok, since they're ignored
# later.
break
# It is allowed to filter on values that would be found from local
# model anyways. For example, if you filter on employee__department__id,
# then the id value would be found already from employee__department_id.
if not prev_field or (
prev_field.is_relation
and field not in prev_field.path_infos[-1].target_fields
):
relation_parts.append(part)
if not getattr(field, "path_infos", None):
# This is not a relational field, so further parts
# must be transforms.
break
prev_field = field
model = field.path_infos[-1].to_opts.model
if len(relation_parts) <= 1:
# Either a local field filter, or no fields at all.
return True
valid_lookups = {self.date_hierarchy}
for filter_item in self.list_filter:
if isinstance(filter_item, type) and issubclass(
filter_item, SimpleListFilter
):
valid_lookups.add(filter_item.parameter_name)
elif isinstance(filter_item, (list, tuple)):
valid_lookups.add(filter_item[0])
else:
valid_lookups.add(filter_item)
# Is it a valid relational lookup?
return not {
LOOKUP_SEP.join(relation_parts),
LOOKUP_SEP.join(relation_parts + [part]),
}.isdisjoint(valid_lookups)
def to_field_allowed(self, request, to_field):
"""
Return True if the model associated with this admin should be
allowed to be referenced by the specified field.
"""
try:
field = self.opts.get_field(to_field)
except FieldDoesNotExist:
return False
# Always allow referencing the primary key since it's already possible
# to get this information from the change view URL.
if field.primary_key:
return True
# Allow reverse relationships to models defining m2m fields if they
# target the specified field.
for many_to_many in self.opts.many_to_many:
if many_to_many.m2m_target_field_name() == to_field:
return True
# Make sure at least one of the models registered for this site
# references this field through a FK or a M2M relationship.
registered_models = set()
for model, admin in self.admin_site._registry.items():
registered_models.add(model)
for inline in admin.inlines:
registered_models.add(inline.model)
related_objects = (
f
for f in self.opts.get_fields(include_hidden=True)
if (f.auto_created and not f.concrete)
)
for related_object in related_objects:
related_model = related_object.related_model
remote_field = related_object.field.remote_field
if (
any(issubclass(model, related_model) for model in registered_models)
and hasattr(remote_field, "get_related_field")
and remote_field.get_related_field() == field
):
return True
return False
def has_add_permission(self, request):
"""
Return True if the given request has permission to add an object.
Can be overridden by the user in subclasses.
"""
opts = self.opts
codename = get_permission_codename("add", opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_change_permission(self, request, obj=None):
"""
Return True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to change the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to change *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename("change", opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
"""
Return True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename("delete", opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_view_permission(self, request, obj=None):
"""
Return True if the given request has permission to view the given
Django model instance. The default implementation doesn't examine the
`obj` parameter.
If overridden by the user in subclasses, it should return True if the
given request has permission to view the `obj` model instance. If `obj`
is None, it should return True if the request has permission to view
any object of the given type.
"""
opts = self.opts
codename_view = get_permission_codename("view", opts)
codename_change = get_permission_codename("change", opts)
return request.user.has_perm(
"%s.%s" % (opts.app_label, codename_view)
) or request.user.has_perm("%s.%s" % (opts.app_label, codename_change))
def has_view_or_change_permission(self, request, obj=None):
return self.has_view_permission(request, obj) or self.has_change_permission(
request, obj
)
def has_module_permission(self, request):
"""
Return True if the given request has any permission in the given
app label.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to view the module on
the admin index page and access the module's index page. Overriding it
does not restrict access to the add, change or delete views. Use
`ModelAdmin.has_(add|change|delete)_permission` for that.
"""
return request.user.has_module_perms(self.opts.app_label)
class ModelAdmin(BaseModelAdmin):
"""Encapsulate all admin options and functionality for a given model."""
list_display = ("__str__",)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
search_help_text = None
date_hierarchy = None
save_as = False
save_as_continue = True
save_on_top = False
paginator = Paginator
preserve_filters = True
inlines = ()
# Custom templates (designed to be over-ridden in subclasses)
add_form_template = None
change_form_template = None
change_list_template = None
delete_confirmation_template = None
delete_selected_confirmation_template = None
object_history_template = None
popup_response_template = None
# Actions
actions = ()
action_form = helpers.ActionForm
actions_on_top = True
actions_on_bottom = False
actions_selection_counter = True
checks_class = ModelAdminChecks
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
super().__init__()
def __str__(self):
return "%s.%s" % (self.opts.app_label, self.__class__.__name__)
def __repr__(self):
return (
f"<{self.__class__.__qualname__}: model={self.model.__qualname__} "
f"site={self.admin_site!r}>"
)
def get_inline_instances(self, request, obj=None):
inline_instances = []
for inline_class in self.get_inlines(request, obj):
inline = inline_class(self.model, self.admin_site)
if request:
if not (
inline.has_view_or_change_permission(request, obj)
or inline.has_add_permission(request, obj)
or inline.has_delete_permission(request, obj)
):
continue
if not inline.has_add_permission(request, obj):
inline.max_num = 0
inline_instances.append(inline)
return inline_instances
def get_urls(self):
from django.urls import path
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
wrapper.model_admin = self
return update_wrapper(wrapper, view)
info = self.opts.app_label, self.opts.model_name
return [
path("", wrap(self.changelist_view), name="%s_%s_changelist" % info),
path("add/", wrap(self.add_view), name="%s_%s_add" % info),
path(
"<path:object_id>/history/",
wrap(self.history_view),
name="%s_%s_history" % info,
),
path(
"<path:object_id>/delete/",
wrap(self.delete_view),
name="%s_%s_delete" % info,
),
path(
"<path:object_id>/change/",
wrap(self.change_view),
name="%s_%s_change" % info,
),
# For backwards compatibility (was the change url before 1.9)
path(
"<path:object_id>/",
wrap(
RedirectView.as_view(
pattern_name="%s:%s_%s_change"
% ((self.admin_site.name,) + info)
)
),
),
]
@property
def urls(self):
return self.get_urls()
@property
def media(self):
extra = "" if settings.DEBUG else ".min"
js = [
"vendor/jquery/jquery%s.js" % extra,
"jquery.init.js",
"core.js",
"admin/RelatedObjectLookups.js",
"actions.js",
"urlify.js",
"prepopulate.js",
"vendor/xregexp/xregexp%s.js" % extra,
]
return forms.Media(js=["admin/js/%s" % url for url in js])
def get_model_perms(self, request):
"""
Return a dict of all perms for this model. This dict has the keys
``add``, ``change``, ``delete``, and ``view`` mapping to the True/False
for each of those actions.
"""
return {
"add": self.has_add_permission(request),
"change": self.has_change_permission(request),
"delete": self.has_delete_permission(request),
"view": self.has_view_permission(request),
}
def _get_form_for_get_fields(self, request, obj):
return self.get_form(request, obj, fields=None)
def get_form(self, request, obj=None, change=False, **kwargs):
"""
Return a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if "fields" in kwargs:
fields = kwargs.pop("fields")
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
excluded = self.get_exclude(request, obj)
exclude = [] if excluded is None else list(excluded)
readonly_fields = self.get_readonly_fields(request, obj)
exclude.extend(readonly_fields)
# Exclude all fields if it's a change form and the user doesn't have
# the change permission.
if (
change
and hasattr(request, "user")
and not self.has_change_permission(request, obj)
):
exclude.extend(fields)
if excluded is None and hasattr(self.form, "_meta") and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# if exclude is an empty list we pass None to be consistent with the
# default on modelform_factory
exclude = exclude or None
# Remove declared form fields which are in readonly_fields.
new_attrs = dict.fromkeys(
f for f in readonly_fields if f in self.form.declared_fields
)
form = type(self.form.__name__, (self.form,), new_attrs)
defaults = {
"form": form,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
**kwargs,
}
if defaults["fields"] is None and not modelform_defines_fields(
defaults["form"]
):
defaults["fields"] = forms.ALL_FIELDS
try:
return modelform_factory(self.model, **defaults)
except FieldError as e:
raise FieldError(
"%s. Check fields/fieldsets/exclude attributes of class %s."
% (e, self.__class__.__name__)
)
def get_changelist(self, request, **kwargs):
"""
Return the ChangeList class for use on the changelist page.
"""
from django.contrib.admin.views.main import ChangeList
return ChangeList
def get_changelist_instance(self, request):
"""
Return a `ChangeList` instance based on `request`. May raise
`IncorrectLookupParameters`.
"""
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
# Add the action checkboxes if any actions are available.
if self.get_actions(request):
list_display = ["action_checkbox", *list_display]
sortable_by = self.get_sortable_by(request)
ChangeList = self.get_changelist(request)
return ChangeList(
request,
self.model,
list_display,
list_display_links,
self.get_list_filter(request),
self.date_hierarchy,
self.get_search_fields(request),
self.get_list_select_related(request),
self.list_per_page,
self.list_max_show_all,
self.list_editable,
self,
sortable_by,
self.search_help_text,
)
def get_object(self, request, object_id, from_field=None):
"""
Return an instance matching the field and value provided, the primary
key is used if no field is provided. Return ``None`` if no match is
found or the object_id fails validation.
"""
queryset = self.get_queryset(request)
model = queryset.model
field = (
model._meta.pk if from_field is None else model._meta.get_field(from_field)
)
try:
object_id = field.to_python(object_id)
return queryset.get(**{field.name: object_id})
except (model.DoesNotExist, ValidationError, ValueError):
return None
def get_changelist_form(self, request, **kwargs):
"""
Return a Form class for use in the Formset on the changelist page.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
**kwargs,
}
if defaults.get("fields") is None and not modelform_defines_fields(
defaults.get("form")
):
defaults["fields"] = forms.ALL_FIELDS
return modelform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
"""
Return a FormSet class for use on the changelist page if list_editable
is used.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
**kwargs,
}
return modelformset_factory(
self.model,
self.get_changelist_form(request),
extra=0,
fields=self.list_editable,
**defaults,
)
def get_formsets_with_inlines(self, request, obj=None):
"""
Yield formsets and the corresponding inlines.
"""
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj), inline
def get_paginator(
self, request, queryset, per_page, orphans=0, allow_empty_first_page=True
):
return self.paginator(queryset, per_page, orphans, allow_empty_first_page)
def log_addition(self, request, obj, message):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import ADDITION, LogEntry
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj).pk,
object_id=obj.pk,
object_repr=str(obj),
action_flag=ADDITION,
change_message=message,
)
def log_change(self, request, obj, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import CHANGE, LogEntry
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj).pk,
object_id=obj.pk,
object_repr=str(obj),
action_flag=CHANGE,
change_message=message,
)
def log_deletion(self, request, obj, object_repr):
"""
Log that an object will be deleted. Note that this method must be
called before the deletion.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import DELETION, LogEntry
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj).pk,
object_id=obj.pk,
object_repr=object_repr,
action_flag=DELETION,
)
@display(description=mark_safe('<input type="checkbox" id="action-toggle">'))
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, str(obj.pk))
@staticmethod
def _get_action_description(func, name):
return getattr(func, "short_description", capfirst(name.replace("_", " ")))
def _get_base_actions(self):
"""Return the list of actions, prior to any request-based filtering."""
actions = []
base_actions = (self.get_action(action) for action in self.actions or [])
# get_action might have returned None, so filter any of those out.
base_actions = [action for action in base_actions if action]
base_action_names = {name for _, name, _ in base_actions}
# Gather actions from the admin site first
for (name, func) in self.admin_site.actions:
if name in base_action_names:
continue
description = self._get_action_description(func, name)
actions.append((func, name, description))
# Add actions from this ModelAdmin.
actions.extend(base_actions)
return actions
def _filter_actions_by_permissions(self, request, actions):
"""Filter out any actions that the user doesn't have access to."""
filtered_actions = []
for action in actions:
callable = action[0]
if not hasattr(callable, "allowed_permissions"):
filtered_actions.append(action)
continue
permission_checks = (
getattr(self, "has_%s_permission" % permission)
for permission in callable.allowed_permissions
)
if any(has_permission(request) for has_permission in permission_checks):
filtered_actions.append(action)
return filtered_actions
def get_actions(self, request):
"""
Return a dictionary mapping the names of all actions for this
ModelAdmin to a tuple of (callable, name, description) for each action.
"""
# If self.actions is set to None that means actions are disabled on
# this page.
if self.actions is None or IS_POPUP_VAR in request.GET:
return {}
actions = self._filter_actions_by_permissions(request, self._get_base_actions())
return {name: (func, name, desc) for func, name, desc in actions}
def get_action_choices(self, request, default_choices=models.BLANK_CHOICE_DASH):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = [] + default_choices
for func, name, description in self.get_actions(request).values():
choice = (name, description % model_format_dict(self.opts))
choices.append(choice)
return choices
def get_action(self, action):
"""
Return a given action from a parameter, which can either be a callable,
or the name of a method on the ModelAdmin. Return is a tuple of
(callable, name, description).
"""
# If the action is a callable, just use it.
if callable(action):
func = action
action = action.__name__
# Next, look for a method. Grab it off self.__class__ to get an unbound
# method instead of a bound one; this ensures that the calling
# conventions are the same for functions and methods.
elif hasattr(self.__class__, action):
func = getattr(self.__class__, action)
# Finally, look for a named method on the admin site
else:
try:
func = self.admin_site.get_action(action)
except KeyError:
return None
description = self._get_action_description(func, action)
return func, action, description
def get_list_display(self, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
return self.list_display
def get_list_display_links(self, request, list_display):
"""
Return a sequence containing the fields to be displayed as links
on the changelist. The list_display parameter is the list of fields
returned by get_list_display().
"""
if (
self.list_display_links
or self.list_display_links is None
or not list_display
):
return self.list_display_links
else:
# Use only the first item in list_display as link
return list(list_display)[:1]
def get_list_filter(self, request):
"""
Return a sequence containing the fields to be displayed as filters in
the right sidebar of the changelist page.
"""
return self.list_filter
def get_list_select_related(self, request):
"""
Return a list of fields to add to the select_related() part of the
changelist items query.
"""
return self.list_select_related
def get_search_fields(self, request):
"""
Return a sequence containing the fields to be searched whenever
somebody submits a search query.
"""
return self.search_fields
def get_search_results(self, request, queryset, search_term):
"""
Return a tuple containing a queryset to implement the search
and a boolean indicating if the results may contain duplicates.
"""
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith("^"):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith("="):
return "%s__iexact" % field_name[1:]
elif field_name.startswith("@"):
return "%s__search" % field_name[1:]
# Use field_name if it includes a lookup.
opts = queryset.model._meta
lookup_fields = field_name.split(LOOKUP_SEP)
# Go through the fields, following all relations.
prev_field = None
for path_part in lookup_fields:
if path_part == "pk":
path_part = opts.pk.name
try:
field = opts.get_field(path_part)
except FieldDoesNotExist:
# Use valid query lookups.
if prev_field and prev_field.get_lookup(path_part):
return field_name
else:
prev_field = field
if hasattr(field, "path_infos"):
# Update opts to follow the relation.
opts = field.path_infos[-1].to_opts
# Otherwise, use the field with icontains.
return "%s__icontains" % field_name
may_have_duplicates = False
search_fields = self.get_search_fields(request)
if search_fields and search_term:
orm_lookups = [
construct_search(str(search_field)) for search_field in search_fields
]
term_queries = []
for bit in smart_split(search_term):
if bit.startswith(('"', "'")) and bit[0] == bit[-1]:
bit = unescape_string_literal(bit)
or_queries = models.Q.create(
[(orm_lookup, bit) for orm_lookup in orm_lookups],
connector=models.Q.OR,
)
term_queries.append(or_queries)
queryset = queryset.filter(models.Q.create(term_queries))
may_have_duplicates |= any(
lookup_spawns_duplicates(self.opts, search_spec)
for search_spec in orm_lookups
)
return queryset, may_have_duplicates
def get_preserved_filters(self, request):
"""
Return the preserved filters querystring.
"""
match = request.resolver_match
if self.preserve_filters and match:
current_url = "%s:%s" % (match.app_name, match.url_name)
changelist_url = "admin:%s_%s_changelist" % (
self.opts.app_label,
self.opts.model_name,
)
if current_url == changelist_url:
preserved_filters = request.GET.urlencode()
else:
preserved_filters = request.GET.get("_changelist_filters")
if preserved_filters:
return urlencode({"_changelist_filters": preserved_filters})
return ""
def construct_change_message(self, request, form, formsets, add=False):
"""
Construct a JSON structure describing changes from a changed object.
"""
return construct_change_message(form, formsets, add)
def message_user(
self, request, message, level=messages.INFO, extra_tags="", fail_silently=False
):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
Exposes almost the same API as messages.add_message(), but accepts the
positional arguments in a different order to maintain backwards
compatibility. For convenience, it accepts the `level` argument as
a string rather than the usual level number.
"""
if not isinstance(level, int):
# attempt to get the level if passed a string
try:
level = getattr(messages.constants, level.upper())
except AttributeError:
levels = messages.constants.DEFAULT_TAGS.values()
levels_repr = ", ".join("`%s`" % level for level in levels)
raise ValueError(
"Bad message level string: `%s`. Possible values are: %s"
% (level, levels_repr)
)
messages.add_message(
request, level, message, extra_tags=extra_tags, fail_silently=fail_silently
)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
obj.save()
def delete_model(self, request, obj):
"""
Given a model instance delete it from the database.
"""
obj.delete()
def delete_queryset(self, request, queryset):
"""Given a queryset, delete it from the database."""
queryset.delete()
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
def save_related(self, request, form, formsets, change):
"""
Given the ``HttpRequest``, the parent ``ModelForm`` instance, the
list of inline formsets and a boolean value based on whether the
parent is being added or changed, save the related objects to the
database. Note that at this point save_form() and save_model() have
already been called.
"""
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=change)
def render_change_form(
self, request, context, add=False, change=False, form_url="", obj=None
):
app_label = self.opts.app_label
preserved_filters = self.get_preserved_filters(request)
form_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": self.opts}, form_url
)
view_on_site_url = self.get_view_on_site_url(obj)
has_editable_inline_admin_formsets = False
for inline in context["inline_admin_formsets"]:
if (
inline.has_add_permission
or inline.has_change_permission
or inline.has_delete_permission
):
has_editable_inline_admin_formsets = True
break
context.update(
{
"add": add,
"change": change,
"has_view_permission": self.has_view_permission(request, obj),
"has_add_permission": self.has_add_permission(request),
"has_change_permission": self.has_change_permission(request, obj),
"has_delete_permission": self.has_delete_permission(request, obj),
"has_editable_inline_admin_formsets": (
has_editable_inline_admin_formsets
),
"has_file_field": context["adminform"].form.is_multipart()
or any(
admin_formset.formset.is_multipart()
for admin_formset in context["inline_admin_formsets"]
),
"has_absolute_url": view_on_site_url is not None,
"absolute_url": view_on_site_url,
"form_url": form_url,
"opts": self.opts,
"content_type_id": get_content_type_for_model(self.model).pk,
"save_as": self.save_as,
"save_on_top": self.save_on_top,
"to_field_var": TO_FIELD_VAR,
"is_popup_var": IS_POPUP_VAR,
"app_label": app_label,
}
)
if add and self.add_form_template is not None:
form_template = self.add_form_template
else:
form_template = self.change_form_template
request.current_app = self.admin_site.name
return TemplateResponse(
request,
form_template
or [
"admin/%s/%s/change_form.html" % (app_label, self.opts.model_name),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html",
],
context,
)
def response_add(self, request, obj, post_url_continue=None):
"""
Determine the HttpResponse for the add_view stage.
"""
opts = obj._meta
preserved_filters = self.get_preserved_filters(request)
obj_url = reverse(
"admin:%s_%s_change" % (opts.app_label, opts.model_name),
args=(quote(obj.pk),),
current_app=self.admin_site.name,
)
# Add a link to the object's change form if the user can edit the obj.
if self.has_change_permission(request, obj):
obj_repr = format_html('<a href="{}">{}</a>', urlquote(obj_url), obj)
else:
obj_repr = str(obj)
msg_dict = {
"name": opts.verbose_name,
"obj": obj_repr,
}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = obj._meta.pk.attname
value = obj.serializable_value(attr)
popup_response_data = json.dumps(
{
"value": str(value),
"obj": str(obj),
}
)
return TemplateResponse(
request,
self.popup_response_template
or [
"admin/%s/%s/popup_response.html"
% (opts.app_label, opts.model_name),
"admin/%s/popup_response.html" % opts.app_label,
"admin/popup_response.html",
],
{
"popup_response_data": popup_response_data,
},
)
elif "_continue" in request.POST or (
# Redirecting after "Save as new".
"_saveasnew" in request.POST
and self.save_as_continue
and self.has_change_permission(request, obj)
):
msg = _("The {name} “{obj}” was added successfully.")
if self.has_change_permission(request, obj):
msg += " " + _("You may edit it again below.")
self.message_user(request, format_html(msg, **msg_dict), messages.SUCCESS)
if post_url_continue is None:
post_url_continue = obj_url
post_url_continue = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts},
post_url_continue,
)
return HttpResponseRedirect(post_url_continue)
elif "_addanother" in request.POST:
msg = format_html(
_(
"The {name} “{obj}” was added successfully. You may add another "
"{name} below."
),
**msg_dict,
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts}, redirect_url
)
return HttpResponseRedirect(redirect_url)
else:
msg = format_html(
_("The {name} “{obj}” was added successfully."), **msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_add(request, obj)
def response_change(self, request, obj):
"""
Determine the HttpResponse for the change_view stage.
"""
if IS_POPUP_VAR in request.POST:
opts = obj._meta
to_field = request.POST.get(TO_FIELD_VAR)
attr = str(to_field) if to_field else opts.pk.attname
value = request.resolver_match.kwargs["object_id"]
new_value = obj.serializable_value(attr)
popup_response_data = json.dumps(
{
"action": "change",
"value": str(value),
"obj": str(obj),
"new_value": str(new_value),
}
)
return TemplateResponse(
request,
self.popup_response_template
or [
"admin/%s/%s/popup_response.html"
% (opts.app_label, opts.model_name),
"admin/%s/popup_response.html" % opts.app_label,
"admin/popup_response.html",
],
{
"popup_response_data": popup_response_data,
},
)
opts = self.opts
preserved_filters = self.get_preserved_filters(request)
msg_dict = {
"name": opts.verbose_name,
"obj": format_html('<a href="{}">{}</a>', urlquote(request.path), obj),
}
if "_continue" in request.POST:
msg = format_html(
_(
"The {name} “{obj}” was changed successfully. You may edit it "
"again below."
),
**msg_dict,
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts}, redirect_url
)
return HttpResponseRedirect(redirect_url)
elif "_saveasnew" in request.POST:
msg = format_html(
_(
"The {name} “{obj}” was added successfully. You may edit it again "
"below."
),
**msg_dict,
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse(
"admin:%s_%s_change" % (opts.app_label, opts.model_name),
args=(obj.pk,),
current_app=self.admin_site.name,
)
redirect_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts}, redirect_url
)
return HttpResponseRedirect(redirect_url)
elif "_addanother" in request.POST:
msg = format_html(
_(
"The {name} “{obj}” was changed successfully. You may add another "
"{name} below."
),
**msg_dict,
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse(
"admin:%s_%s_add" % (opts.app_label, opts.model_name),
current_app=self.admin_site.name,
)
redirect_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts}, redirect_url
)
return HttpResponseRedirect(redirect_url)
else:
msg = format_html(
_("The {name} “{obj}” was changed successfully."), **msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_change(request, obj)
def _response_post_save(self, request, obj):
if self.has_view_or_change_permission(request):
post_url = reverse(
"admin:%s_%s_changelist" % (self.opts.app_label, self.opts.model_name),
current_app=self.admin_site.name,
)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": self.opts}, post_url
)
else:
post_url = reverse("admin:index", current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_post_save_add(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when adding a new object.
"""
return self._response_post_save(request, obj)
def response_post_save_change(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when editing an existing object.
"""
return self._response_post_save(request, obj)
def response_action(self, request, queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get("index", 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({"action": data.getlist("action")[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields["action"].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data["action"]
select_across = action_form.cleaned_data["select_across"]
func = self.get_actions(request)[action][0]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _(
"Items must be selected in order to perform "
"actions on them. No items have been changed."
)
self.message_user(request, msg, messages.WARNING)
return None
if not select_across:
# Perform the action only on the selected objects
queryset = queryset.filter(pk__in=selected)
response = func(self, request, queryset)
# Actions may return an HttpResponse-like object, which will be
# used as the response from the POST. If not, we'll be a good
# little HTTP citizen and redirect back to the changelist page.
if isinstance(response, HttpResponseBase):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg, messages.WARNING)
return None
def response_delete(self, request, obj_display, obj_id):
"""
Determine the HttpResponse for the delete_view stage.
"""
if IS_POPUP_VAR in request.POST:
popup_response_data = json.dumps(
{
"action": "delete",
"value": str(obj_id),
}
)
return TemplateResponse(
request,
self.popup_response_template
or [
"admin/%s/%s/popup_response.html"
% (self.opts.app_label, self.opts.model_name),
"admin/%s/popup_response.html" % self.opts.app_label,
"admin/popup_response.html",
],
{
"popup_response_data": popup_response_data,
},
)
self.message_user(
request,
_("The %(name)s “%(obj)s” was deleted successfully.")
% {
"name": self.opts.verbose_name,
"obj": obj_display,
},
messages.SUCCESS,
)
if self.has_change_permission(request, None):
post_url = reverse(
"admin:%s_%s_changelist" % (self.opts.app_label, self.opts.model_name),
current_app=self.admin_site.name,
)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": self.opts}, post_url
)
else:
post_url = reverse("admin:index", current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def render_delete_form(self, request, context):
app_label = self.opts.app_label
request.current_app = self.admin_site.name
context.update(
to_field_var=TO_FIELD_VAR,
is_popup_var=IS_POPUP_VAR,
media=self.media,
)
return TemplateResponse(
request,
self.delete_confirmation_template
or [
"admin/{}/{}/delete_confirmation.html".format(
app_label, self.opts.model_name
),
"admin/{}/delete_confirmation.html".format(app_label),
"admin/delete_confirmation.html",
],
context,
)
def get_inline_formsets(self, request, formsets, inline_instances, obj=None):
# Edit permissions on parent model are required for editable inlines.
can_edit_parent = (
self.has_change_permission(request, obj)
if obj
else self.has_add_permission(request)
)
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
if can_edit_parent:
has_add_permission = inline.has_add_permission(request, obj)
has_change_permission = inline.has_change_permission(request, obj)
has_delete_permission = inline.has_delete_permission(request, obj)
else:
# Disable all edit-permissions, and override formset settings.
has_add_permission = (
has_change_permission
) = has_delete_permission = False
formset.extra = formset.max_num = 0
has_view_permission = inline.has_view_permission(request, obj)
prepopulated = dict(inline.get_prepopulated_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(
inline,
formset,
fieldsets,
prepopulated,
readonly,
model_admin=self,
has_add_permission=has_add_permission,
has_change_permission=has_change_permission,
has_delete_permission=has_delete_permission,
has_view_permission=has_view_permission,
)
inline_admin_formsets.append(inline_admin_formset)
return inline_admin_formsets
def get_changeform_initial_data(self, request):
"""
Get the initial form data from the request's GET params.
"""
initial = dict(request.GET.items())
for k in initial:
try:
f = self.opts.get_field(k)
except FieldDoesNotExist:
continue
# We have to special-case M2Ms as a list of comma-separated PKs.
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
return initial
def _get_obj_does_not_exist_redirect(self, request, opts, object_id):
"""
Create a message informing the user that the object doesn't exist
and return a redirect to the admin index page.
"""
msg = _("%(name)s with ID “%(key)s” doesn’t exist. Perhaps it was deleted?") % {
"name": opts.verbose_name,
"key": unquote(object_id),
}
self.message_user(request, msg, messages.WARNING)
url = reverse("admin:index", current_app=self.admin_site.name)
return HttpResponseRedirect(url)
@csrf_protect_m
def changeform_view(self, request, object_id=None, form_url="", extra_context=None):
with transaction.atomic(using=router.db_for_write(self.model)):
return self._changeform_view(request, object_id, form_url, extra_context)
def _changeform_view(self, request, object_id, form_url, extra_context):
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField(
"The field %s cannot be referenced." % to_field
)
if request.method == "POST" and "_saveasnew" in request.POST:
object_id = None
add = object_id is None
if add:
if not self.has_add_permission(request):
raise PermissionDenied
obj = None
else:
obj = self.get_object(request, unquote(object_id), to_field)
if request.method == "POST":
if not self.has_change_permission(request, obj):
raise PermissionDenied
else:
if not self.has_view_or_change_permission(request, obj):
raise PermissionDenied
if obj is None:
return self._get_obj_does_not_exist_redirect(
request, self.opts, object_id
)
fieldsets = self.get_fieldsets(request, obj)
ModelForm = self.get_form(
request, obj, change=not add, fields=flatten_fieldsets(fieldsets)
)
if request.method == "POST":
form = ModelForm(request.POST, request.FILES, instance=obj)
formsets, inline_instances = self._create_formsets(
request,
form.instance,
change=not add,
)
form_validated = form.is_valid()
if form_validated:
new_object = self.save_form(request, form, change=not add)
else:
new_object = form.instance
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, not add)
self.save_related(request, form, formsets, not add)
change_message = self.construct_change_message(
request, form, formsets, add
)
if add:
self.log_addition(request, new_object, change_message)
return self.response_add(request, new_object)
else:
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form_validated = False
else:
if add:
initial = self.get_changeform_initial_data(request)
form = ModelForm(initial=initial)
formsets, inline_instances = self._create_formsets(
request, form.instance, change=False
)
else:
form = ModelForm(instance=obj)
formsets, inline_instances = self._create_formsets(
request, obj, change=True
)
if not add and not self.has_change_permission(request, obj):
readonly_fields = flatten_fieldsets(fieldsets)
else:
readonly_fields = self.get_readonly_fields(request, obj)
adminForm = helpers.AdminForm(
form,
list(fieldsets),
# Clear prepopulated fields on a view-only form to avoid a crash.
self.get_prepopulated_fields(request, obj)
if add or self.has_change_permission(request, obj)
else {},
readonly_fields,
model_admin=self,
)
media = self.media + adminForm.media
inline_formsets = self.get_inline_formsets(
request, formsets, inline_instances, obj
)
for inline_formset in inline_formsets:
media = media + inline_formset.media
if add:
title = _("Add %s")
elif self.has_change_permission(request, obj):
title = _("Change %s")
else:
title = _("View %s")
context = {
**self.admin_site.each_context(request),
"title": title % self.opts.verbose_name,
"subtitle": str(obj) if obj else None,
"adminform": adminForm,
"object_id": object_id,
"original": obj,
"is_popup": IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET,
"to_field": to_field,
"media": media,
"inline_admin_formsets": inline_formsets,
"errors": helpers.AdminErrorList(form, formsets),
"preserved_filters": self.get_preserved_filters(request),
}
# Hide the "Save" and "Save and continue" buttons if "Save as New" was
# previously chosen to prevent the interface from getting confusing.
if (
request.method == "POST"
and not form_validated
and "_saveasnew" in request.POST
):
context["show_save"] = False
context["show_save_and_continue"] = False
# Use the change template instead of the add template.
add = False
context.update(extra_context or {})
return self.render_change_form(
request, context, add=add, change=not add, obj=obj, form_url=form_url
)
def add_view(self, request, form_url="", extra_context=None):
return self.changeform_view(request, None, form_url, extra_context)
def change_view(self, request, object_id, form_url="", extra_context=None):
return self.changeform_view(request, object_id, form_url, extra_context)
def _get_edited_object_pks(self, request, prefix):
"""Return POST data values of list_editable primary keys."""
pk_pattern = re.compile(
r"{}-\d+-{}$".format(re.escape(prefix), self.opts.pk.name)
)
return [value for key, value in request.POST.items() if pk_pattern.match(key)]
def _get_list_editable_queryset(self, request, prefix):
"""
Based on POST data, return a queryset of the objects that were edited
via list_editable.
"""
object_pks = self._get_edited_object_pks(request, prefix)
queryset = self.get_queryset(request)
validate = queryset.model._meta.pk.to_python
try:
for pk in object_pks:
validate(pk)
except ValidationError:
# Disable the optimization if the POST data was tampered with.
return queryset
return queryset.filter(pk__in=object_pks)
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
"""
The 'change list' admin view for this model.
"""
from django.contrib.admin.views.main import ERROR_FLAG
app_label = self.opts.app_label
if not self.has_view_or_change_permission(request):
raise PermissionDenied
try:
cl = self.get_changelist_instance(request)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given
# and the 'invalid=1' parameter was already in the query string,
# something is screwed up with the database, so display an error
# page.
if ERROR_FLAG in request.GET:
return SimpleTemplateResponse(
"admin/invalid_setup.html",
{
"title": _("Database error"),
},
)
return HttpResponseRedirect(request.path + "?" + ERROR_FLAG + "=1")
# If the request was POSTed, this might be a bulk action or a bulk
# edit. Try to look up an action or confirmation first, but if this
# isn't an action the POST will fall through to the bulk edit check,
# below.
action_failed = False
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
actions = self.get_actions(request)
# Actions with no confirmation
if (
actions
and request.method == "POST"
and "index" in request.POST
and "_save" not in request.POST
):
if selected:
response = self.response_action(
request, queryset=cl.get_queryset(request)
)
if response:
return response
else:
action_failed = True
else:
msg = _(
"Items must be selected in order to perform "
"actions on them. No items have been changed."
)
self.message_user(request, msg, messages.WARNING)
action_failed = True
# Actions with confirmation
if (
actions
and request.method == "POST"
and helpers.ACTION_CHECKBOX_NAME in request.POST
and "index" not in request.POST
and "_save" not in request.POST
):
if selected:
response = self.response_action(
request, queryset=cl.get_queryset(request)
)
if response:
return response
else:
action_failed = True
if action_failed:
# Redirect back to the changelist page to avoid resubmitting the
# form if the user refreshes the browser or uses the "No, take
# me back" button on the action confirmation page.
return HttpResponseRedirect(request.get_full_path())
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if request.method == "POST" and cl.list_editable and "_save" in request.POST:
if not self.has_change_permission(request):
raise PermissionDenied
FormSet = self.get_changelist_formset(request)
modified_objects = self._get_list_editable_queryset(
request, FormSet.get_default_prefix()
)
formset = cl.formset = FormSet(
request.POST, request.FILES, queryset=modified_objects
)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
self.save_related(request, form, formsets=[], change=True)
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
msg = ngettext(
"%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount,
) % {
"count": changecount,
"name": model_ngettext(self.opts, changecount),
}
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif cl.list_editable and self.has_change_permission(request):
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields["action"].choices = self.get_action_choices(request)
media += action_form.media
else:
action_form = None
selection_note_all = ngettext(
"%(total_count)s selected", "All %(total_count)s selected", cl.result_count
)
context = {
**self.admin_site.each_context(request),
"module_name": str(self.opts.verbose_name_plural),
"selection_note": _("0 of %(cnt)s selected") % {"cnt": len(cl.result_list)},
"selection_note_all": selection_note_all % {"total_count": cl.result_count},
"title": cl.title,
"subtitle": None,
"is_popup": cl.is_popup,
"to_field": cl.to_field,
"cl": cl,
"media": media,
"has_add_permission": self.has_add_permission(request),
"opts": cl.opts,
"action_form": action_form,
"actions_on_top": self.actions_on_top,
"actions_on_bottom": self.actions_on_bottom,
"actions_selection_counter": self.actions_selection_counter,
"preserved_filters": self.get_preserved_filters(request),
**(extra_context or {}),
}
request.current_app = self.admin_site.name
return TemplateResponse(
request,
self.change_list_template
or [
"admin/%s/%s/change_list.html" % (app_label, self.opts.model_name),
"admin/%s/change_list.html" % app_label,
"admin/change_list.html",
],
context,
)
def get_deleted_objects(self, objs, request):
"""
Hook for customizing the delete process for the delete view and the
"delete selected" action.
"""
return get_deleted_objects(objs, request, self.admin_site)
@csrf_protect_m
def delete_view(self, request, object_id, extra_context=None):
with transaction.atomic(using=router.db_for_write(self.model)):
return self._delete_view(request, object_id, extra_context)
def _delete_view(self, request, object_id, extra_context):
"The 'delete' admin view for this model."
app_label = self.opts.app_label
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField(
"The field %s cannot be referenced." % to_field
)
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
return self._get_obj_does_not_exist_redirect(request, self.opts, object_id)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(
deleted_objects,
model_count,
perms_needed,
protected,
) = self.get_deleted_objects([obj], request)
if request.POST and not protected: # The user has confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = str(obj)
attr = str(to_field) if to_field else self.opts.pk.attname
obj_id = obj.serializable_value(attr)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
return self.response_delete(request, obj_display, obj_id)
object_name = str(self.opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = {
**self.admin_site.each_context(request),
"title": title,
"subtitle": None,
"object_name": object_name,
"object": obj,
"deleted_objects": deleted_objects,
"model_count": dict(model_count).items(),
"perms_lacking": perms_needed,
"protected": protected,
"opts": self.opts,
"app_label": app_label,
"preserved_filters": self.get_preserved_filters(request),
"is_popup": IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET,
"to_field": to_field,
**(extra_context or {}),
}
return self.render_delete_form(request, context)
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
from django.contrib.admin.views.main import PAGE_VAR
# First check if the user can see this history.
model = self.model
obj = self.get_object(request, unquote(object_id))
if obj is None:
return self._get_obj_does_not_exist_redirect(
request, model._meta, object_id
)
if not self.has_view_or_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
app_label = self.opts.app_label
action_list = (
LogEntry.objects.filter(
object_id=unquote(object_id),
content_type=get_content_type_for_model(model),
)
.select_related()
.order_by("action_time")
)
paginator = self.get_paginator(request, action_list, 100)
page_number = request.GET.get(PAGE_VAR, 1)
page_obj = paginator.get_page(page_number)
page_range = paginator.get_elided_page_range(page_obj.number)
context = {
**self.admin_site.each_context(request),
"title": _("Change history: %s") % obj,
"subtitle": None,
"action_list": page_obj,
"page_range": page_range,
"page_var": PAGE_VAR,
"pagination_required": paginator.count > 100,
"module_name": str(capfirst(self.opts.verbose_name_plural)),
"object": obj,
"opts": self.opts,
"preserved_filters": self.get_preserved_filters(request),
**(extra_context or {}),
}
request.current_app = self.admin_site.name
return TemplateResponse(
request,
self.object_history_template
or [
"admin/%s/%s/object_history.html" % (app_label, self.opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html",
],
context,
)
def get_formset_kwargs(self, request, obj, inline, prefix):
formset_params = {
"instance": obj,
"prefix": prefix,
"queryset": inline.get_queryset(request),
}
if request.method == "POST":
formset_params.update(
{
"data": request.POST.copy(),
"files": request.FILES,
"save_as_new": "_saveasnew" in request.POST,
}
)
return formset_params
def _create_formsets(self, request, obj, change):
"Helper function to generate formsets for add/change_view."
formsets = []
inline_instances = []
prefixes = {}
get_formsets_args = [request]
if change:
get_formsets_args.append(obj)
for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset_params = self.get_formset_kwargs(request, obj, inline, prefix)
formset = FormSet(**formset_params)
def user_deleted_form(request, obj, formset, index):
"""Return whether or not the user deleted the form."""
return (
inline.has_delete_permission(request, obj)
and "{}-{}-DELETE".format(formset.prefix, index) in request.POST
)
# Bypass validation of each view-only inline form (since the form's
# data won't be in request.POST), unless the form was deleted.
if not inline.has_change_permission(request, obj if change else None):
for index, form in enumerate(formset.initial_forms):
if user_deleted_form(request, obj, formset, index):
continue
form._errors = {}
form.cleaned_data = form.initial
formsets.append(formset)
inline_instances.append(inline)
return formsets, inline_instances
class InlineModelAdmin(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``fk_name`` to specify the attribute name of the ``ForeignKey``
from ``model`` to its parent. This is required if ``model`` has more than
one ``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
min_num = None
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
show_change_link = False
checks_class = InlineModelAdminChecks
classes = None
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
self.has_registered_model = admin_site.is_registered(self.model)
super().__init__()
if self.verbose_name_plural is None:
if self.verbose_name is None:
self.verbose_name_plural = self.opts.verbose_name_plural
else:
self.verbose_name_plural = format_lazy("{}s", self.verbose_name)
if self.verbose_name is None:
self.verbose_name = self.opts.verbose_name
@property
def media(self):
extra = "" if settings.DEBUG else ".min"
js = ["vendor/jquery/jquery%s.js" % extra, "jquery.init.js", "inlines.js"]
if self.filter_vertical or self.filter_horizontal:
js.extend(["SelectBox.js", "SelectFilter2.js"])
if self.classes and "collapse" in self.classes:
js.append("collapse.js")
return forms.Media(js=["admin/js/%s" % url for url in js])
def get_extra(self, request, obj=None, **kwargs):
"""Hook for customizing the number of extra inline forms."""
return self.extra
def get_min_num(self, request, obj=None, **kwargs):
"""Hook for customizing the min number of inline forms."""
return self.min_num
def get_max_num(self, request, obj=None, **kwargs):
"""Hook for customizing the max number of extra inline forms."""
return self.max_num
def get_formset(self, request, obj=None, **kwargs):
"""Return a BaseInlineFormSet class for use in admin add/change views."""
if "fields" in kwargs:
fields = kwargs.pop("fields")
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
excluded = self.get_exclude(request, obj)
exclude = [] if excluded is None else list(excluded)
exclude.extend(self.get_readonly_fields(request, obj))
if excluded is None and hasattr(self.form, "_meta") and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# InlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# If exclude is an empty list we use None, since that's the actual
# default.
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"extra": self.get_extra(request, obj, **kwargs),
"min_num": self.get_min_num(request, obj, **kwargs),
"max_num": self.get_max_num(request, obj, **kwargs),
"can_delete": can_delete,
**kwargs,
}
base_model_form = defaults["form"]
can_change = self.has_change_permission(request, obj) if request else True
can_add = self.has_add_permission(request, obj) if request else True
class DeleteProtectedModelForm(base_model_form):
def hand_clean_DELETE(self):
"""
We don't validate the 'DELETE' field itself because on
templates it's not rendered using the field information, but
just using a generic "deletion_field" of the InlineModelAdmin.
"""
if self.cleaned_data.get(DELETION_FIELD_NAME, False):
using = router.db_for_write(self._meta.model)
collector = NestedObjects(using=using)
if self.instance._state.adding:
return
collector.collect([self.instance])
if collector.protected:
objs = []
for p in collector.protected:
objs.append(
# Translators: Model verbose name and instance
# representation, suitable to be an item in a
# list.
_("%(class_name)s %(instance)s")
% {"class_name": p._meta.verbose_name, "instance": p}
)
params = {
"class_name": self._meta.model._meta.verbose_name,
"instance": self.instance,
"related_objects": get_text_list(objs, _("and")),
}
msg = _(
"Deleting %(class_name)s %(instance)s would require "
"deleting the following protected related objects: "
"%(related_objects)s"
)
raise ValidationError(
msg, code="deleting_protected", params=params
)
def is_valid(self):
result = super().is_valid()
self.hand_clean_DELETE()
return result
def has_changed(self):
# Protect against unauthorized edits.
if not can_change and not self.instance._state.adding:
return False
if not can_add and self.instance._state.adding:
return False
return super().has_changed()
defaults["form"] = DeleteProtectedModelForm
if defaults["fields"] is None and not modelform_defines_fields(
defaults["form"]
):
defaults["fields"] = forms.ALL_FIELDS
return inlineformset_factory(self.parent_model, self.model, **defaults)
def _get_form_for_get_fields(self, request, obj=None):
return self.get_formset(request, obj, fields=None).form
def get_queryset(self, request):
queryset = super().get_queryset(request)
if not self.has_view_or_change_permission(request):
queryset = queryset.none()
return queryset
def _has_any_perms_for_target_model(self, request, perms):
"""
This method is called only when the ModelAdmin's model is for an
ManyToManyField's implicit through model (if self.opts.auto_created).
Return True if the user has any of the given permissions ('add',
'change', etc.) for the model that points to the through model.
"""
opts = self.opts
# Find the target model of an auto-created many-to-many relationship.
for field in opts.fields:
if field.remote_field and field.remote_field.model != self.parent_model:
opts = field.remote_field.model._meta
break
return any(
request.user.has_perm(
"%s.%s" % (opts.app_label, get_permission_codename(perm, opts))
)
for perm in perms
)
def has_add_permission(self, request, obj):
if self.opts.auto_created:
# Auto-created intermediate models don't have their own
# permissions. The user needs to have the change permission for the
# related model in order to be able to do anything with the
# intermediate model.
return self._has_any_perms_for_target_model(request, ["change"])
return super().has_add_permission(request)
def has_change_permission(self, request, obj=None):
if self.opts.auto_created:
# Same comment as has_add_permission().
return self._has_any_perms_for_target_model(request, ["change"])
return super().has_change_permission(request)
def has_delete_permission(self, request, obj=None):
if self.opts.auto_created:
# Same comment as has_add_permission().
return self._has_any_perms_for_target_model(request, ["change"])
return super().has_delete_permission(request, obj)
def has_view_permission(self, request, obj=None):
if self.opts.auto_created:
# Same comment as has_add_permission(). The 'change' permission
# also implies the 'view' permission.
return self._has_any_perms_for_target_model(request, ["view", "change"])
return super().has_view_permission(request)
class StackedInline(InlineModelAdmin):
template = "admin/edit_inline/stacked.html"
class TabularInline(InlineModelAdmin):
template = "admin/edit_inline/tabular.html"
|
9201adaef9a3dfeab667b6500b76eaaa7e1095312d467473c9ca278b7aa4c8a2 | import functools
import itertools
from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, models, router, transaction
from django.db.models import DO_NOTHING, ForeignObject, ForeignObjectRel
from django.db.models.base import ModelBase, make_foreign_order_accessors
from django.db.models.fields.mixins import FieldCacheMixin
from django.db.models.fields.related import (
ReverseManyToOneDescriptor,
lazy_related_operation,
)
from django.db.models.query_utils import PathInfo
from django.db.models.sql import AND
from django.db.models.sql.where import WhereNode
from django.utils.functional import cached_property
class GenericForeignKey(FieldCacheMixin):
"""
Provide a generic many-to-one relation through the ``content_type`` and
``object_id`` fields.
This class also doubles as an accessor to the related object (similar to
ForwardManyToOneDescriptor) by adding itself as a model attribute.
"""
# Field flags
auto_created = False
concrete = False
editable = False
hidden = False
is_relation = True
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
related_model = None
remote_field = None
def __init__(
self, ct_field="content_type", fk_field="object_id", for_concrete_model=True
):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
self.rel = None
self.column = None
def contribute_to_class(self, cls, name, **kwargs):
self.name = name
self.model = cls
cls._meta.add_field(self, private=True)
setattr(cls, name, self)
def get_filter_kwargs_for_object(self, obj):
"""See corresponding method on Field"""
return {
self.fk_field: getattr(obj, self.fk_field),
self.ct_field: getattr(obj, self.ct_field),
}
def get_forward_related_filter(self, obj):
"""See corresponding method on RelatedField"""
return {
self.fk_field: obj.pk,
self.ct_field: ContentType.objects.get_for_model(obj).pk,
}
def __str__(self):
model = self.model
return "%s.%s" % (model._meta.label, self.name)
def check(self, **kwargs):
return [
*self._check_field_name(),
*self._check_object_id_field(),
*self._check_content_type_field(),
]
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
"Field names must not end with an underscore.",
obj=self,
id="fields.E001",
)
]
else:
return []
def _check_object_id_field(self):
try:
self.model._meta.get_field(self.fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey object ID references the "
"nonexistent field '%s'." % self.fk_field,
obj=self,
id="contenttypes.E001",
)
]
else:
return []
def _check_content_type_field(self):
"""
Check if field named `field_name` in model `model` exists and is a
valid content_type field (is a ForeignKey to ContentType).
"""
try:
field = self.model._meta.get_field(self.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey content type references the "
"nonexistent field '%s.%s'."
% (self.model._meta.object_name, self.ct_field),
obj=self,
id="contenttypes.E002",
)
]
else:
if not isinstance(field, models.ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey."
% (self.model._meta.object_name, self.ct_field),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id="contenttypes.E003",
)
]
elif field.remote_field.model != ContentType:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'."
% (self.model._meta.object_name, self.ct_field),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id="contenttypes.E004",
)
]
else:
return []
def get_cache_name(self):
return self.name
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model
)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(
id=ct_id, using=obj._state.db
).model_class()
return (
model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model,
)
return (
ret_val,
lambda obj: (obj.pk, obj.__class__),
gfk_key,
True,
self.name,
False,
)
def __get__(self, instance, cls=None):
if instance is None:
return self
# Don't use getattr(instance, self.ct_field) here because that might
# reload the same ContentType over and over (#5570). Instead, get the
# content type ID here, and later when the actual instance is needed,
# use ContentType.objects.get_for_id(), which has a global cache.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
pk_val = getattr(instance, self.fk_field)
rel_obj = self.get_cached_value(instance, default=None)
if rel_obj is None and self.is_cached(instance):
return rel_obj
if rel_obj is not None:
ct_match = (
ct_id == self.get_content_type(obj=rel_obj, using=instance._state.db).id
)
pk_match = rel_obj._meta.pk.to_python(pk_val) == rel_obj.pk
if ct_match and pk_match:
return rel_obj
else:
rel_obj = None
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=pk_val)
except ObjectDoesNotExist:
pass
self.set_cached_value(instance, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value.pk
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
self.set_cached_value(instance, value)
class GenericRel(ForeignObjectRel):
"""
Used by GenericRelation to store information about the relation.
"""
def __init__(
self,
field,
to,
related_name=None,
related_query_name=None,
limit_choices_to=None,
):
super().__init__(
field,
to,
related_name=related_query_name or "+",
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
on_delete=DO_NOTHING,
)
class GenericRelation(ForeignObject):
"""
Provide a reverse to a relation created by a GenericForeignKey.
"""
# Field flags
auto_created = False
empty_strings_allowed = False
many_to_many = False
many_to_one = False
one_to_many = True
one_to_one = False
rel_class = GenericRel
mti_inherited = False
def __init__(
self,
to,
object_id_field="object_id",
content_type_field="content_type",
for_concrete_model=True,
related_query_name=None,
limit_choices_to=None,
**kwargs,
):
kwargs["rel"] = self.rel_class(
self,
to,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
# Reverse relations are always nullable (Django can't enforce that a
# foreign key on the related model points to this model).
kwargs["null"] = True
kwargs["blank"] = True
kwargs["on_delete"] = models.CASCADE
kwargs["editable"] = False
kwargs["serialize"] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super().__init__(to, from_fields=[object_id_field], to_fields=[], **kwargs)
self.object_id_field_name = object_id_field
self.content_type_field_name = content_type_field
self.for_concrete_model = for_concrete_model
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_generic_foreign_key_existence(),
]
def _is_matching_generic_foreign_key(self, field):
"""
Return True if field is a GenericForeignKey whose content type and
object id fields correspond to the equivalent attributes on this
GenericRelation.
"""
return (
isinstance(field, GenericForeignKey)
and field.ct_field == self.content_type_field_name
and field.fk_field == self.object_id_field_name
)
def _check_generic_foreign_key_existence(self):
target = self.remote_field.model
if isinstance(target, ModelBase):
fields = target._meta.private_fields
if any(self._is_matching_generic_foreign_key(field) for field in fields):
return []
else:
return [
checks.Error(
"The GenericRelation defines a relation with the model "
"'%s', but that model does not have a GenericForeignKey."
% target._meta.label,
obj=self,
id="contenttypes.E004",
)
]
else:
return []
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [
(
self.remote_field.model._meta.get_field(self.object_id_field_name),
self.model._meta.pk,
)
]
def _get_path_info_with_parent(self, filtered_relation):
"""
Return the path that joins the current model through any parent models.
The idea is that if you have a GFK defined on a parent model then we
need to join the parent model first, then the child model.
"""
# With an inheritance chain ChildTag -> Tag and Tag defines the
# GenericForeignKey, and a TaggedItem model has a GenericRelation to
# ChildTag, then we need to generate a join from TaggedItem to Tag
# (as Tag.object_id == TaggedItem.pk), and another join from Tag to
# ChildTag (as that is where the relation is to). Do this by first
# generating a join to the parent model, then generating joins to the
# child models.
path = []
opts = self.remote_field.model._meta.concrete_model._meta
parent_opts = opts.get_field(self.object_id_field_name).model._meta
target = parent_opts.pk
path.append(
PathInfo(
from_opts=self.model._meta,
to_opts=parent_opts,
target_fields=(target,),
join_field=self.remote_field,
m2m=True,
direct=False,
filtered_relation=filtered_relation,
)
)
# Collect joins needed for the parent -> child chain. This is easiest
# to do if we collect joins for the child -> parent chain and then
# reverse the direction (call to reverse() and use of
# field.remote_field.get_path_info()).
parent_field_chain = []
while parent_opts != opts:
field = opts.get_ancestor_link(parent_opts.model)
parent_field_chain.append(field)
opts = field.remote_field.model._meta
parent_field_chain.reverse()
for field in parent_field_chain:
path.extend(field.remote_field.path_infos)
return path
def get_path_info(self, filtered_relation=None):
opts = self.remote_field.model._meta
object_id_field = opts.get_field(self.object_id_field_name)
if object_id_field.model != opts.model:
return self._get_path_info_with_parent(filtered_relation)
else:
target = opts.pk
return [
PathInfo(
from_opts=self.model._meta,
to_opts=opts,
target_fields=(target,),
join_field=self.remote_field,
m2m=True,
direct=False,
filtered_relation=filtered_relation,
)
]
def get_reverse_path_info(self, filtered_relation=None):
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [
PathInfo(
from_opts=from_opts,
to_opts=opts,
target_fields=(opts.pk,),
join_field=self,
m2m=not self.unique,
direct=False,
filtered_relation=filtered_relation,
)
]
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return str([instance.pk for instance in qs])
def contribute_to_class(self, cls, name, **kwargs):
kwargs["private_only"] = True
super().contribute_to_class(cls, name, **kwargs)
self.model = cls
# Disable the reverse relation for fields inherited by subclasses of a
# model in multi-table inheritance. The reverse relation points to the
# field of the base model.
if self.mti_inherited:
self.remote_field.related_name = "+"
self.remote_field.related_query_name = None
setattr(cls, self.name, ReverseGenericManyToOneDescriptor(self.remote_field))
# Add get_RELATED_order() and set_RELATED_order() to the model this
# field belongs to, if the model on the other end of this relation
# is ordered with respect to its corresponding GenericForeignKey.
if not cls._meta.abstract:
def make_generic_foreign_order_accessors(related_model, model):
if self._is_matching_generic_foreign_key(
model._meta.order_with_respect_to
):
make_foreign_order_accessors(model, related_model)
lazy_related_operation(
make_generic_foreign_order_accessors,
self.model,
self.remote_field.model,
)
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Return the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(
self.model, for_concrete_model=self.for_concrete_model
)
def get_extra_restriction(self, alias, remote_alias):
field = self.remote_field.model._meta.get_field(self.content_type_field_name)
contenttype_pk = self.get_content_type().pk
lookup = field.get_lookup("exact")(field.get_col(remote_alias), contenttype_pk)
return WhereNode([lookup], connector=AND)
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.remote_field.model._base_manager.db_manager(using).filter(
**{
"%s__pk"
% self.content_type_field_name: ContentType.objects.db_manager(using)
.get_for_model(self.model, for_concrete_model=self.for_concrete_model)
.pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs],
}
)
class ReverseGenericManyToOneDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the one-to-many relation created
by GenericRelation.
In the example::
class Post(Model):
comments = GenericRelation(Comment)
``post.comments`` is a ReverseGenericManyToOneDescriptor instance.
"""
@cached_property
def related_manager_cls(self):
return create_generic_related_manager(
self.rel.model._default_manager.__class__,
self.rel,
)
@cached_property
def related_manager_cache_key(self):
# By default, GenericRel instances will be marked as hidden unless
# related_query_name is given (their accessor name being "+" when
# hidden), which would cause multiple GenericRelations declared on a
# single model to collide, so always use the remote field's name.
return self.field.get_cache_name()
def create_generic_related_manager(superclass, rel):
"""
Factory function to create a manager that subclasses another manager
(generally the default manager of a given model) and adds behaviors
specific to generic relations.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, instance=None):
super().__init__()
self.instance = instance
self.model = rel.model
self.get_content_type = functools.partial(
ContentType.objects.db_manager(instance._state.db).get_for_model,
for_concrete_model=rel.field.for_concrete_model,
)
self.content_type = self.get_content_type(instance)
self.content_type_field_name = rel.field.content_type_field_name
self.object_id_field_name = rel.field.object_id_field_name
self.prefetch_cache_name = rel.field.attname
self.pk_val = instance.pk
self.core_filters = {
"%s__pk" % self.content_type_field_name: self.content_type.id,
self.object_id_field_name: self.pk_val,
}
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_generic_related_manager(manager.__class__, rel)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def __str__(self):
return repr(self)
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
db = self._db or router.db_for_read(self.model, instance=self.instance)
return queryset.using(db).filter(**self.core_filters)
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name)
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
# Group instances by content types.
content_type_queries = [
models.Q.create(
[
(f"{self.content_type_field_name}__pk", content_type_id),
(f"{self.object_id_field_name}__in", {obj.pk for obj in objs}),
]
)
for content_type_id, objs in itertools.groupby(
sorted(instances, key=lambda obj: self.get_content_type(obj).pk),
lambda obj: self.get_content_type(obj).pk,
)
]
query = models.Q.create(content_type_queries, connector=models.Q.OR)
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
content_type_id_field_name = "%s_id" % self.content_type_field_name
return (
queryset.filter(query),
lambda relobj: (
object_id_converter(getattr(relobj, self.object_id_field_name)),
getattr(relobj, content_type_id_field_name),
),
lambda obj: (obj.pk, self.get_content_type(obj).pk),
False,
self.prefetch_cache_name,
False,
)
def add(self, *objs, bulk=True):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError(
"'%s' instance expected, got %r"
% (self.model._meta.object_name, obj)
)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
if bulk:
pks = []
for obj in objs:
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
check_and_update_obj(obj)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(
**{
self.content_type_field_name: self.content_type,
self.object_id_field_name: self.pk_val,
}
)
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def remove(self, *objs, bulk=True):
if not objs:
return
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
def clear(self, *, bulk=True):
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.delete()` creates its own atomic block which
# contains the `pre_delete` and `post_delete` signal handlers.
queryset.delete()
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def set(self, objs, *, bulk=True, clear=False):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs)
self.add(*new_objs, bulk=bulk)
set.alters_data = True
def create(self, **kwargs):
self._remove_prefetched_objects()
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super().using(db).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super().using(db).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super().using(db).update_or_create(**kwargs)
update_or_create.alters_data = True
return GenericRelatedObjectManager
|
ee0842a8af13d1606a3f8144b7dc1daec32859408241fc3d913af1d9498c93a6 | import datetime
import importlib
import io
import os
import shutil
import sys
from unittest import mock
from django.apps import apps
from django.core.management import CommandError, call_command
from django.db import (
ConnectionHandler,
DatabaseError,
OperationalError,
connection,
connections,
models,
)
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.backends.utils import truncate_name
from django.db.migrations.exceptions import InconsistentMigrationHistory
from django.db.migrations.recorder import MigrationRecorder
from django.test import TestCase, override_settings, skipUnlessDBFeature
from django.test.utils import captured_stdout
from django.utils import timezone
from django.utils.version import get_docs_version
from .models import UnicodeModel, UnserializableModel
from .routers import TestRouter
from .test_base import MigrationTestBase
HAS_BLACK = shutil.which("black")
class MigrateTests(MigrationTestBase):
"""
Tests running the migrate command.
"""
databases = {"default", "other"}
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_migrate(self):
"""
Tests basic usage of the migrate command.
"""
# No tables are created
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
# Run the migrations to 0001 only
stdout = io.StringIO()
call_command(
"migrate", "migrations", "0001", verbosity=2, stdout=stdout, no_color=True
)
stdout = stdout.getvalue()
self.assertIn(
"Target specific migration: 0001_initial, from migrations", stdout
)
self.assertIn("Applying migrations.0001_initial... OK", stdout)
self.assertIn("Running pre-migrate handlers for application migrations", stdout)
self.assertIn(
"Running post-migrate handlers for application migrations", stdout
)
# The correct tables exist
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
# Run migrations all the way
call_command("migrate", verbosity=0)
# The correct tables exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
# Unmigrate everything
stdout = io.StringIO()
call_command(
"migrate", "migrations", "zero", verbosity=2, stdout=stdout, no_color=True
)
stdout = stdout.getvalue()
self.assertIn("Unapply all migrations: migrations", stdout)
self.assertIn("Unapplying migrations.0002_second... OK", stdout)
self.assertIn("Running pre-migrate handlers for application migrations", stdout)
self.assertIn(
"Running post-migrate handlers for application migrations", stdout
)
# Tables are gone
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
@override_settings(
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"migrations.migrations_test_apps.migrated_app",
]
)
def test_migrate_with_system_checks(self):
out = io.StringIO()
call_command("migrate", skip_checks=False, no_color=True, stdout=out)
self.assertIn("Apply all migrations: migrated_app", out.getvalue())
@override_settings(
INSTALLED_APPS=[
"migrations",
"migrations.migrations_test_apps.unmigrated_app_syncdb",
]
)
def test_app_without_migrations(self):
msg = "App 'unmigrated_app_syncdb' does not have migrations."
with self.assertRaisesMessage(CommandError, msg):
call_command("migrate", app_label="unmigrated_app_syncdb")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_clashing_prefix"}
)
def test_ambiguous_prefix(self):
msg = (
"More than one migration matches 'a' in app 'migrations'. Please "
"be more specific."
)
with self.assertRaisesMessage(CommandError, msg):
call_command("migrate", app_label="migrations", migration_name="a")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_unknown_prefix(self):
msg = "Cannot find a migration matching 'nonexistent' from app 'migrations'."
with self.assertRaisesMessage(CommandError, msg):
call_command(
"migrate", app_label="migrations", migration_name="nonexistent"
)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_initial_false"}
)
def test_migrate_initial_false(self):
"""
`Migration.initial = False` skips fake-initial detection.
"""
# Make sure no tables are created
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Run the migrations to 0001 only
call_command("migrate", "migrations", "0001", verbosity=0)
# Fake rollback
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
# Make sure fake-initial detection does not run
with self.assertRaises(DatabaseError):
call_command(
"migrate", "migrations", "0001", fake_initial=True, verbosity=0
)
call_command("migrate", "migrations", "0001", fake=True, verbosity=0)
# Real rollback
call_command("migrate", "migrations", "zero", verbosity=0)
# Make sure it's all gone
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations"},
DATABASE_ROUTERS=["migrations.routers.TestRouter"],
)
def test_migrate_fake_initial(self):
"""
--fake-initial only works if all tables created in the initial
migration of an app exists. Database routers must be obeyed when doing
that check.
"""
# Make sure no tables are created
for db in self.databases:
self.assertTableNotExists("migrations_author", using=db)
self.assertTableNotExists("migrations_tribble", using=db)
# Run the migrations to 0001 only
call_command("migrate", "migrations", "0001", verbosity=0)
call_command("migrate", "migrations", "0001", verbosity=0, database="other")
# Make sure the right tables exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Also check the "other" database
self.assertTableNotExists("migrations_author", using="other")
self.assertTableExists("migrations_tribble", using="other")
# Fake a roll-back
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
call_command(
"migrate", "migrations", "zero", fake=True, verbosity=0, database="other"
)
# Make sure the tables still exist
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble", using="other")
# Try to run initial migration
with self.assertRaises(DatabaseError):
call_command("migrate", "migrations", "0001", verbosity=0)
# Run initial migration with an explicit --fake-initial
out = io.StringIO()
with mock.patch(
"django.core.management.color.supports_color", lambda *args: False
):
call_command(
"migrate",
"migrations",
"0001",
fake_initial=True,
stdout=out,
verbosity=1,
)
call_command(
"migrate",
"migrations",
"0001",
fake_initial=True,
verbosity=0,
database="other",
)
self.assertIn("migrations.0001_initial... faked", out.getvalue().lower())
try:
# Run migrations all the way.
call_command("migrate", verbosity=0)
call_command("migrate", verbosity=0, database="other")
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
self.assertTableNotExists("migrations_author", using="other")
self.assertTableNotExists("migrations_tribble", using="other")
self.assertTableNotExists("migrations_book", using="other")
# Fake a roll-back.
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
call_command(
"migrate",
"migrations",
"zero",
fake=True,
verbosity=0,
database="other",
)
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
# Run initial migration.
with self.assertRaises(DatabaseError):
call_command("migrate", "migrations", verbosity=0)
# Run initial migration with an explicit --fake-initial.
with self.assertRaises(DatabaseError):
# Fails because "migrations_tribble" does not exist but needs
# to in order to make --fake-initial work.
call_command("migrate", "migrations", fake_initial=True, verbosity=0)
# Fake an apply.
call_command("migrate", "migrations", fake=True, verbosity=0)
call_command(
"migrate", "migrations", fake=True, verbosity=0, database="other"
)
finally:
# Unmigrate everything.
call_command("migrate", "migrations", "zero", verbosity=0)
call_command("migrate", "migrations", "zero", verbosity=0, database="other")
# Make sure it's all gone
for db in self.databases:
self.assertTableNotExists("migrations_author", using=db)
self.assertTableNotExists("migrations_tribble", using=db)
self.assertTableNotExists("migrations_book", using=db)
@skipUnlessDBFeature("ignores_table_name_case")
def test_migrate_fake_initial_case_insensitive(self):
with override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_fake_initial_case_insensitive.initial",
}
):
call_command("migrate", "migrations", "0001", verbosity=0)
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
with override_settings(
MIGRATION_MODULES={
"migrations": (
"migrations.test_fake_initial_case_insensitive.fake_initial"
),
}
):
out = io.StringIO()
call_command(
"migrate",
"migrations",
"0001",
fake_initial=True,
stdout=out,
verbosity=1,
no_color=True,
)
self.assertIn(
"migrations.0001_initial... faked",
out.getvalue().lower(),
)
@override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_migrations_fake_split_initial"
}
)
def test_migrate_fake_split_initial(self):
"""
Split initial migrations can be faked with --fake-initial.
"""
try:
call_command("migrate", "migrations", "0002", verbosity=0)
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
out = io.StringIO()
with mock.patch(
"django.core.management.color.supports_color", lambda *args: False
):
call_command(
"migrate",
"migrations",
"0002",
fake_initial=True,
stdout=out,
verbosity=1,
)
value = out.getvalue().lower()
self.assertIn("migrations.0001_initial... faked", value)
self.assertIn("migrations.0002_second... faked", value)
finally:
# Fake an apply.
call_command("migrate", "migrations", fake=True, verbosity=0)
# Unmigrate everything.
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"}
)
def test_migrate_conflict_exit(self):
"""
migrate exits if it detects a conflict.
"""
msg = (
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (0002_conflicting_second, 0002_second in "
"migrations).\n"
"To fix them run 'python manage.py makemigrations --merge'"
)
with self.assertRaisesMessage(CommandError, msg):
call_command("migrate", "migrations")
@override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_migrations",
}
)
def test_migrate_check(self):
with self.assertRaises(SystemExit):
call_command("migrate", "migrations", "0001", check_unapplied=True)
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
@override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_migrations_plan",
}
)
def test_migrate_check_plan(self):
out = io.StringIO()
with self.assertRaises(SystemExit):
call_command(
"migrate",
"migrations",
"0001",
check_unapplied=True,
plan=True,
stdout=out,
no_color=True,
)
self.assertEqual(
"Planned operations:\n"
"migrations.0001_initial\n"
" Create model Salamander\n"
" Raw Python operation -> Grow salamander tail.\n",
out.getvalue(),
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_showmigrations_list(self):
"""
showmigrations --list displays migrations and whether or not they're
applied.
"""
out = io.StringIO()
with mock.patch(
"django.core.management.color.supports_color", lambda *args: True
):
call_command(
"showmigrations", format="list", stdout=out, verbosity=0, no_color=False
)
self.assertEqual(
"\x1b[1mmigrations\n\x1b[0m [ ] 0001_initial\n [ ] 0002_second\n",
out.getvalue().lower(),
)
call_command("migrate", "migrations", "0001", verbosity=0)
out = io.StringIO()
# Giving the explicit app_label tests for selective `show_list` in the command
call_command(
"showmigrations",
"migrations",
format="list",
stdout=out,
verbosity=0,
no_color=True,
)
self.assertEqual(
"migrations\n [x] 0001_initial\n [ ] 0002_second\n", out.getvalue().lower()
)
out = io.StringIO()
# Applied datetimes are displayed at verbosity 2+.
call_command(
"showmigrations", "migrations", stdout=out, verbosity=2, no_color=True
)
migration1 = MigrationRecorder(connection).migration_qs.get(
app="migrations", name="0001_initial"
)
self.assertEqual(
"migrations\n"
" [x] 0001_initial (applied at %s)\n"
" [ ] 0002_second\n" % migration1.applied.strftime("%Y-%m-%d %H:%M:%S"),
out.getvalue().lower(),
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_showmigrations_list_squashed(self):
out = io.StringIO()
call_command(
"showmigrations", format="list", stdout=out, verbosity=2, no_color=True
)
self.assertEqual(
"migrations\n [ ] 0001_squashed_0002 (2 squashed migrations)\n",
out.getvalue().lower(),
)
out = io.StringIO()
call_command(
"migrate",
"migrations",
"0001_squashed_0002",
stdout=out,
verbosity=2,
no_color=True,
)
try:
self.assertIn(
"operations to perform:\n"
" target specific migration: 0001_squashed_0002, from migrations\n"
"running pre-migrate handlers for application migrations\n"
"running migrations:\n"
" applying migrations.0001_squashed_0002... ok (",
out.getvalue().lower(),
)
out = io.StringIO()
call_command(
"showmigrations", format="list", stdout=out, verbosity=2, no_color=True
)
self.assertEqual(
"migrations\n [x] 0001_squashed_0002 (2 squashed migrations)\n",
out.getvalue().lower(),
)
finally:
# Unmigrate everything.
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"}
)
def test_showmigrations_plan(self):
"""
Tests --plan output of showmigrations command
"""
out = io.StringIO()
call_command("showmigrations", format="plan", stdout=out)
self.assertEqual(
"[ ] migrations.0001_initial\n"
"[ ] migrations.0003_third\n"
"[ ] migrations.0002_second\n",
out.getvalue().lower(),
)
out = io.StringIO()
call_command("showmigrations", format="plan", stdout=out, verbosity=2)
self.assertEqual(
"[ ] migrations.0001_initial\n"
"[ ] migrations.0003_third ... (migrations.0001_initial)\n"
"[ ] migrations.0002_second ... (migrations.0001_initial, "
"migrations.0003_third)\n",
out.getvalue().lower(),
)
call_command("migrate", "migrations", "0003", verbosity=0)
out = io.StringIO()
call_command("showmigrations", format="plan", stdout=out)
self.assertEqual(
"[x] migrations.0001_initial\n"
"[x] migrations.0003_third\n"
"[ ] migrations.0002_second\n",
out.getvalue().lower(),
)
out = io.StringIO()
call_command("showmigrations", format="plan", stdout=out, verbosity=2)
self.assertEqual(
"[x] migrations.0001_initial\n"
"[x] migrations.0003_third ... (migrations.0001_initial)\n"
"[ ] migrations.0002_second ... (migrations.0001_initial, "
"migrations.0003_third)\n",
out.getvalue().lower(),
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_plan"}
)
def test_migrate_plan(self):
"""Tests migrate --plan output."""
out = io.StringIO()
# Show the plan up to the third migration.
call_command(
"migrate", "migrations", "0003", plan=True, stdout=out, no_color=True
)
self.assertEqual(
"Planned operations:\n"
"migrations.0001_initial\n"
" Create model Salamander\n"
" Raw Python operation -> Grow salamander tail.\n"
"migrations.0002_second\n"
" Create model Book\n"
" Raw SQL operation -> ['SELECT * FROM migrations_book']\n"
"migrations.0003_third\n"
" Create model Author\n"
" Raw SQL operation -> ['SELECT * FROM migrations_author']\n",
out.getvalue(),
)
try:
# Migrate to the third migration.
call_command("migrate", "migrations", "0003", verbosity=0)
out = io.StringIO()
# Show the plan for when there is nothing to apply.
call_command(
"migrate", "migrations", "0003", plan=True, stdout=out, no_color=True
)
self.assertEqual(
"Planned operations:\n No planned migration operations.\n",
out.getvalue(),
)
out = io.StringIO()
# Show the plan for reverse migration back to 0001.
call_command(
"migrate", "migrations", "0001", plan=True, stdout=out, no_color=True
)
self.assertEqual(
"Planned operations:\n"
"migrations.0003_third\n"
" Undo Create model Author\n"
" Raw SQL operation -> ['SELECT * FROM migrations_book']\n"
"migrations.0002_second\n"
" Undo Create model Book\n"
" Raw SQL operation -> ['SELECT * FROM migrations_salamand…\n",
out.getvalue(),
)
out = io.StringIO()
# Show the migration plan to fourth, with truncated details.
call_command(
"migrate", "migrations", "0004", plan=True, stdout=out, no_color=True
)
self.assertEqual(
"Planned operations:\n"
"migrations.0004_fourth\n"
" Raw SQL operation -> SELECT * FROM migrations_author WHE…\n",
out.getvalue(),
)
# Show the plan when an operation is irreversible.
# Migrate to the fourth migration.
call_command("migrate", "migrations", "0004", verbosity=0)
out = io.StringIO()
call_command(
"migrate", "migrations", "0003", plan=True, stdout=out, no_color=True
)
self.assertEqual(
"Planned operations:\n"
"migrations.0004_fourth\n"
" Raw SQL operation -> IRREVERSIBLE\n",
out.getvalue(),
)
out = io.StringIO()
call_command(
"migrate", "migrations", "0005", plan=True, stdout=out, no_color=True
)
# Operation is marked as irreversible only in the revert plan.
self.assertEqual(
"Planned operations:\n"
"migrations.0005_fifth\n"
" Raw Python operation\n"
" Raw Python operation\n"
" Raw Python operation -> Feed salamander.\n",
out.getvalue(),
)
call_command("migrate", "migrations", "0005", verbosity=0)
out = io.StringIO()
call_command(
"migrate", "migrations", "0004", plan=True, stdout=out, no_color=True
)
self.assertEqual(
"Planned operations:\n"
"migrations.0005_fifth\n"
" Raw Python operation -> IRREVERSIBLE\n"
" Raw Python operation -> IRREVERSIBLE\n"
" Raw Python operation\n",
out.getvalue(),
)
finally:
# Cleanup by unmigrating everything: fake the irreversible, then
# migrate all to zero.
call_command("migrate", "migrations", "0003", fake=True, verbosity=0)
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_empty"}
)
def test_showmigrations_no_migrations(self):
out = io.StringIO()
call_command("showmigrations", stdout=out, no_color=True)
self.assertEqual("migrations\n (no migrations)\n", out.getvalue().lower())
@override_settings(
INSTALLED_APPS=["migrations.migrations_test_apps.unmigrated_app"]
)
def test_showmigrations_unmigrated_app(self):
out = io.StringIO()
call_command("showmigrations", "unmigrated_app", stdout=out, no_color=True)
try:
self.assertEqual(
"unmigrated_app\n (no migrations)\n", out.getvalue().lower()
)
finally:
# unmigrated_app.SillyModel has a foreign key to
# 'migrations.Tribble', but that model is only defined in a
# migration, so the global app registry never sees it and the
# reference is left dangling. Remove it to avoid problems in
# subsequent tests.
apps._pending_operations.pop(("migrations", "tribble"), None)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_empty"}
)
def test_showmigrations_plan_no_migrations(self):
"""
Tests --plan output of showmigrations command without migrations
"""
out = io.StringIO()
call_command("showmigrations", format="plan", stdout=out, no_color=True)
self.assertEqual("(no migrations)\n", out.getvalue().lower())
out = io.StringIO()
call_command(
"showmigrations", format="plan", stdout=out, verbosity=2, no_color=True
)
self.assertEqual("(no migrations)\n", out.getvalue().lower())
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"}
)
def test_showmigrations_plan_squashed(self):
"""
Tests --plan output of showmigrations command with squashed migrations.
"""
out = io.StringIO()
call_command("showmigrations", format="plan", stdout=out)
self.assertEqual(
"[ ] migrations.1_auto\n"
"[ ] migrations.2_auto\n"
"[ ] migrations.3_squashed_5\n"
"[ ] migrations.6_auto\n"
"[ ] migrations.7_auto\n",
out.getvalue().lower(),
)
out = io.StringIO()
call_command("showmigrations", format="plan", stdout=out, verbosity=2)
self.assertEqual(
"[ ] migrations.1_auto\n"
"[ ] migrations.2_auto ... (migrations.1_auto)\n"
"[ ] migrations.3_squashed_5 ... (migrations.2_auto)\n"
"[ ] migrations.6_auto ... (migrations.3_squashed_5)\n"
"[ ] migrations.7_auto ... (migrations.6_auto)\n",
out.getvalue().lower(),
)
call_command("migrate", "migrations", "3_squashed_5", verbosity=0)
out = io.StringIO()
call_command("showmigrations", format="plan", stdout=out)
self.assertEqual(
"[x] migrations.1_auto\n"
"[x] migrations.2_auto\n"
"[x] migrations.3_squashed_5\n"
"[ ] migrations.6_auto\n"
"[ ] migrations.7_auto\n",
out.getvalue().lower(),
)
out = io.StringIO()
call_command("showmigrations", format="plan", stdout=out, verbosity=2)
self.assertEqual(
"[x] migrations.1_auto\n"
"[x] migrations.2_auto ... (migrations.1_auto)\n"
"[x] migrations.3_squashed_5 ... (migrations.2_auto)\n"
"[ ] migrations.6_auto ... (migrations.3_squashed_5)\n"
"[ ] migrations.7_auto ... (migrations.6_auto)\n",
out.getvalue().lower(),
)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.mutate_state_b",
"migrations.migrations_test_apps.alter_fk.author_app",
"migrations.migrations_test_apps.alter_fk.book_app",
]
)
def test_showmigrations_plan_single_app_label(self):
"""
`showmigrations --plan app_label` output with a single app_label.
"""
# Single app with no dependencies on other apps.
out = io.StringIO()
call_command("showmigrations", "mutate_state_b", format="plan", stdout=out)
self.assertEqual(
"[ ] mutate_state_b.0001_initial\n[ ] mutate_state_b.0002_add_field\n",
out.getvalue(),
)
# Single app with dependencies.
out = io.StringIO()
call_command("showmigrations", "author_app", format="plan", stdout=out)
self.assertEqual(
"[ ] author_app.0001_initial\n"
"[ ] book_app.0001_initial\n"
"[ ] author_app.0002_alter_id\n",
out.getvalue(),
)
# Some migrations already applied.
call_command("migrate", "author_app", "0001", verbosity=0)
out = io.StringIO()
call_command("showmigrations", "author_app", format="plan", stdout=out)
self.assertEqual(
"[X] author_app.0001_initial\n"
"[ ] book_app.0001_initial\n"
"[ ] author_app.0002_alter_id\n",
out.getvalue(),
)
# Cleanup by unmigrating author_app.
call_command("migrate", "author_app", "zero", verbosity=0)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.mutate_state_b",
"migrations.migrations_test_apps.alter_fk.author_app",
"migrations.migrations_test_apps.alter_fk.book_app",
]
)
def test_showmigrations_plan_multiple_app_labels(self):
"""
`showmigrations --plan app_label` output with multiple app_labels.
"""
# Multiple apps: author_app depends on book_app; mutate_state_b doesn't
# depend on other apps.
out = io.StringIO()
call_command(
"showmigrations", "mutate_state_b", "author_app", format="plan", stdout=out
)
self.assertEqual(
"[ ] author_app.0001_initial\n"
"[ ] book_app.0001_initial\n"
"[ ] author_app.0002_alter_id\n"
"[ ] mutate_state_b.0001_initial\n"
"[ ] mutate_state_b.0002_add_field\n",
out.getvalue(),
)
# Multiple apps: args order shouldn't matter (the same result is
# expected as above).
out = io.StringIO()
call_command(
"showmigrations", "author_app", "mutate_state_b", format="plan", stdout=out
)
self.assertEqual(
"[ ] author_app.0001_initial\n"
"[ ] book_app.0001_initial\n"
"[ ] author_app.0002_alter_id\n"
"[ ] mutate_state_b.0001_initial\n"
"[ ] mutate_state_b.0002_add_field\n",
out.getvalue(),
)
@override_settings(
INSTALLED_APPS=["migrations.migrations_test_apps.unmigrated_app"]
)
def test_showmigrations_plan_app_label_no_migrations(self):
out = io.StringIO()
call_command(
"showmigrations", "unmigrated_app", format="plan", stdout=out, no_color=True
)
try:
self.assertEqual("(no migrations)\n", out.getvalue())
finally:
# unmigrated_app.SillyModel has a foreign key to
# 'migrations.Tribble', but that model is only defined in a
# migration, so the global app registry never sees it and the
# reference is left dangling. Remove it to avoid problems in
# subsequent tests.
apps._pending_operations.pop(("migrations", "tribble"), None)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_sqlmigrate_forwards(self):
"""
sqlmigrate outputs forward looking SQL.
"""
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out)
lines = out.getvalue().splitlines()
if connection.features.can_rollback_ddl:
self.assertEqual(lines[0], connection.ops.start_transaction_sql())
self.assertEqual(lines[-1], connection.ops.end_transaction_sql())
lines = lines[1:-1]
self.assertEqual(
lines[:3],
[
"--",
"-- Create model Author",
"--",
],
)
self.assertIn(
"create table %s" % connection.ops.quote_name("migrations_author").lower(),
lines[3].lower(),
)
pos = lines.index("--", 3)
self.assertEqual(
lines[pos : pos + 3],
[
"--",
"-- Create model Tribble",
"--",
],
)
self.assertIn(
"create table %s" % connection.ops.quote_name("migrations_tribble").lower(),
lines[pos + 3].lower(),
)
pos = lines.index("--", pos + 3)
self.assertEqual(
lines[pos : pos + 3],
[
"--",
"-- Add field bool to tribble",
"--",
],
)
pos = lines.index("--", pos + 3)
self.assertEqual(
lines[pos : pos + 3],
[
"--",
"-- Alter unique_together for author (1 constraint(s))",
"--",
],
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_sqlmigrate_backwards(self):
"""
sqlmigrate outputs reverse looking SQL.
"""
# Cannot generate the reverse SQL unless we've applied the migration.
call_command("migrate", "migrations", verbosity=0)
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out, backwards=True)
lines = out.getvalue().splitlines()
try:
if connection.features.can_rollback_ddl:
self.assertEqual(lines[0], connection.ops.start_transaction_sql())
self.assertEqual(lines[-1], connection.ops.end_transaction_sql())
lines = lines[1:-1]
self.assertEqual(
lines[:3],
[
"--",
"-- Alter unique_together for author (1 constraint(s))",
"--",
],
)
pos = lines.index("--", 3)
self.assertEqual(
lines[pos : pos + 3],
[
"--",
"-- Add field bool to tribble",
"--",
],
)
pos = lines.index("--", pos + 3)
self.assertEqual(
lines[pos : pos + 3],
[
"--",
"-- Create model Tribble",
"--",
],
)
next_pos = lines.index("--", pos + 3)
drop_table_sql = (
"drop table %s"
% connection.ops.quote_name("migrations_tribble").lower()
)
for line in lines[pos + 3 : next_pos]:
if drop_table_sql in line.lower():
break
else:
self.fail("DROP TABLE (tribble) not found.")
pos = next_pos
self.assertEqual(
lines[pos : pos + 3],
[
"--",
"-- Create model Author",
"--",
],
)
drop_table_sql = (
"drop table %s" % connection.ops.quote_name("migrations_author").lower()
)
for line in lines[pos + 3 :]:
if drop_table_sql in line.lower():
break
else:
self.fail("DROP TABLE (author) not found.")
finally:
# Unmigrate everything.
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_non_atomic"}
)
def test_sqlmigrate_for_non_atomic_migration(self):
"""
Transaction wrappers aren't shown for non-atomic migrations.
"""
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out)
output = out.getvalue().lower()
queries = [q.strip() for q in output.splitlines()]
if connection.ops.start_transaction_sql():
self.assertNotIn(connection.ops.start_transaction_sql().lower(), queries)
self.assertNotIn(connection.ops.end_transaction_sql().lower(), queries)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_sqlmigrate_for_non_transactional_databases(self):
"""
Transaction wrappers aren't shown for databases that don't support
transactional DDL.
"""
out = io.StringIO()
with mock.patch.object(connection.features, "can_rollback_ddl", False):
call_command("sqlmigrate", "migrations", "0001", stdout=out)
output = out.getvalue().lower()
queries = [q.strip() for q in output.splitlines()]
start_transaction_sql = connection.ops.start_transaction_sql()
if start_transaction_sql:
self.assertNotIn(start_transaction_sql.lower(), queries)
self.assertNotIn(connection.ops.end_transaction_sql().lower(), queries)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_sqlmigrate_ambiguous_prefix_squashed_migrations(self):
msg = (
"More than one migration matches '0001' in app 'migrations'. "
"Please be more specific."
)
with self.assertRaisesMessage(CommandError, msg):
call_command("sqlmigrate", "migrations", "0001")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_sqlmigrate_squashed_migration(self):
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0001_squashed_0002", stdout=out)
output = out.getvalue().lower()
self.assertIn("-- create model author", output)
self.assertIn("-- create model book", output)
self.assertNotIn("-- create model tribble", output)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_sqlmigrate_replaced_migration(self):
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0001_initial", stdout=out)
output = out.getvalue().lower()
self.assertIn("-- create model author", output)
self.assertIn("-- create model tribble", output)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_no_operations"}
)
def test_sqlmigrate_no_operations(self):
err = io.StringIO()
call_command("sqlmigrate", "migrations", "0001_initial", stderr=err)
self.assertEqual(err.getvalue(), "No operations found.\n")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_noop"}
)
def test_sqlmigrate_noop(self):
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out)
lines = out.getvalue().splitlines()
if connection.features.can_rollback_ddl:
lines = lines[1:-1]
self.assertEqual(
lines,
[
"--",
"-- Raw SQL operation",
"--",
"-- (no-op)",
],
)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_manual_porting"}
)
def test_sqlmigrate_unrepresentable(self):
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0002", stdout=out)
lines = out.getvalue().splitlines()
if connection.features.can_rollback_ddl:
lines = lines[1:-1]
self.assertEqual(
lines,
[
"--",
"-- Raw Python operation",
"--",
"-- THIS OPERATION CANNOT BE WRITTEN AS SQL",
],
)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.migrated_unapplied_app",
"migrations.migrations_test_apps.unmigrated_app",
],
)
def test_regression_22823_unmigrated_fk_to_migrated_model(self):
"""
Assuming you have 3 apps, `A`, `B`, and `C`, such that:
* `A` has migrations
* `B` has a migration we want to apply
* `C` has no migrations, but has an FK to `A`
When we try to migrate "B", an exception occurs because the
"B" was not included in the ProjectState that is used to detect
soft-applied migrations (#22823).
"""
call_command("migrate", "migrated_unapplied_app", verbosity=0)
# unmigrated_app.SillyModel has a foreign key to 'migrations.Tribble',
# but that model is only defined in a migration, so the global app
# registry never sees it and the reference is left dangling. Remove it
# to avoid problems in subsequent tests.
apps._pending_operations.pop(("migrations", "tribble"), None)
@override_settings(
INSTALLED_APPS=["migrations.migrations_test_apps.unmigrated_app_syncdb"]
)
def test_migrate_syncdb_deferred_sql_executed_with_schemaeditor(self):
"""
For an app without migrations, editor.execute() is used for executing
the syncdb deferred SQL.
"""
stdout = io.StringIO()
with mock.patch.object(BaseDatabaseSchemaEditor, "execute") as execute:
call_command(
"migrate", run_syncdb=True, verbosity=1, stdout=stdout, no_color=True
)
create_table_count = len(
[call for call in execute.mock_calls if "CREATE TABLE" in str(call)]
)
self.assertEqual(create_table_count, 2)
# There's at least one deferred SQL for creating the foreign key
# index.
self.assertGreater(len(execute.mock_calls), 2)
stdout = stdout.getvalue()
self.assertIn("Synchronize unmigrated apps: unmigrated_app_syncdb", stdout)
self.assertIn("Creating tables...", stdout)
table_name = truncate_name(
"unmigrated_app_syncdb_classroom", connection.ops.max_name_length()
)
self.assertIn("Creating table %s" % table_name, stdout)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_migrate_syncdb_app_with_migrations(self):
msg = "Can't use run_syncdb with app 'migrations' as it has migrations."
with self.assertRaisesMessage(CommandError, msg):
call_command("migrate", "migrations", run_syncdb=True, verbosity=0)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.unmigrated_app_syncdb",
"migrations.migrations_test_apps.unmigrated_app_simple",
]
)
def test_migrate_syncdb_app_label(self):
"""
Running migrate --run-syncdb with an app_label only creates tables for
the specified app.
"""
stdout = io.StringIO()
with mock.patch.object(BaseDatabaseSchemaEditor, "execute") as execute:
call_command(
"migrate", "unmigrated_app_syncdb", run_syncdb=True, stdout=stdout
)
create_table_count = len(
[call for call in execute.mock_calls if "CREATE TABLE" in str(call)]
)
self.assertEqual(create_table_count, 2)
self.assertGreater(len(execute.mock_calls), 2)
self.assertIn(
"Synchronize unmigrated app: unmigrated_app_syncdb", stdout.getvalue()
)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_migrate_record_replaced(self):
"""
Running a single squashed migration should record all of the original
replaced migrations as run.
"""
recorder = MigrationRecorder(connection)
out = io.StringIO()
call_command("migrate", "migrations", verbosity=0)
call_command("showmigrations", "migrations", stdout=out, no_color=True)
self.assertEqual(
"migrations\n [x] 0001_squashed_0002 (2 squashed migrations)\n",
out.getvalue().lower(),
)
applied_migrations = recorder.applied_migrations()
self.assertIn(("migrations", "0001_initial"), applied_migrations)
self.assertIn(("migrations", "0002_second"), applied_migrations)
self.assertIn(("migrations", "0001_squashed_0002"), applied_migrations)
# Rollback changes
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_migrate_record_squashed(self):
"""
Running migrate for a squashed migration should record as run
if all of the replaced migrations have been run (#25231).
"""
recorder = MigrationRecorder(connection)
recorder.record_applied("migrations", "0001_initial")
recorder.record_applied("migrations", "0002_second")
out = io.StringIO()
call_command("showmigrations", "migrations", stdout=out, no_color=True)
self.assertEqual(
"migrations\n"
" [-] 0001_squashed_0002 (2 squashed migrations) "
"run 'manage.py migrate' to finish recording.\n",
out.getvalue().lower(),
)
out = io.StringIO()
call_command("migrate", "migrations", verbosity=0)
call_command("showmigrations", "migrations", stdout=out, no_color=True)
self.assertEqual(
"migrations\n [x] 0001_squashed_0002 (2 squashed migrations)\n",
out.getvalue().lower(),
)
self.assertIn(
("migrations", "0001_squashed_0002"), recorder.applied_migrations()
)
# No changes were actually applied so there is nothing to rollback
def test_migrate_partially_applied_squashed_migration(self):
"""
Migrating to a squashed migration specified by name should succeed
even if it is partially applied.
"""
with self.temporary_migration_module(module="migrations.test_migrations"):
recorder = MigrationRecorder(connection)
try:
call_command("migrate", "migrations", "0001_initial", verbosity=0)
call_command(
"squashmigrations",
"migrations",
"0002",
interactive=False,
verbosity=0,
)
call_command(
"migrate",
"migrations",
"0001_squashed_0002_second",
verbosity=0,
)
applied_migrations = recorder.applied_migrations()
self.assertIn(("migrations", "0002_second"), applied_migrations)
finally:
# Unmigrate everything.
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_migrate_backward_to_squashed_migration(self):
try:
call_command("migrate", "migrations", "0001_squashed_0002", verbosity=0)
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_book")
call_command("migrate", "migrations", "0001_initial", verbosity=0)
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_book")
finally:
# Unmigrate everything.
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_migrate_inconsistent_history(self):
"""
Running migrate with some migrations applied before their dependencies
should not be allowed.
"""
recorder = MigrationRecorder(connection)
recorder.record_applied("migrations", "0002_second")
msg = (
"Migration migrations.0002_second is applied before its dependency "
"migrations.0001_initial"
)
with self.assertRaisesMessage(InconsistentMigrationHistory, msg):
call_command("migrate")
applied_migrations = recorder.applied_migrations()
self.assertNotIn(("migrations", "0001_initial"), applied_migrations)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_unapplied_app",
"migrations.migrations_test_apps.migrated_app",
]
)
def test_migrate_not_reflected_changes(self):
class NewModel1(models.Model):
class Meta:
app_label = "migrated_app"
class NewModel2(models.Model):
class Meta:
app_label = "migrated_unapplied_app"
out = io.StringIO()
try:
call_command("migrate", verbosity=0)
call_command("migrate", stdout=out, no_color=True)
self.assertEqual(
"operations to perform:\n"
" apply all migrations: migrated_app, migrated_unapplied_app\n"
"running migrations:\n"
" no migrations to apply.\n"
" your models in app(s): 'migrated_app', "
"'migrated_unapplied_app' have changes that are not yet "
"reflected in a migration, and so won't be applied.\n"
" run 'manage.py makemigrations' to make new migrations, and "
"then re-run 'manage.py migrate' to apply them.\n",
out.getvalue().lower(),
)
finally:
# Unmigrate everything.
call_command("migrate", "migrated_app", "zero", verbosity=0)
call_command("migrate", "migrated_unapplied_app", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_migrations_squashed_no_replaces",
}
)
def test_migrate_prune(self):
"""
With prune=True, references to migration files deleted from the
migrations module (such as after being squashed) are removed from the
django_migrations table.
"""
recorder = MigrationRecorder(connection)
recorder.record_applied("migrations", "0001_initial")
recorder.record_applied("migrations", "0002_second")
recorder.record_applied("migrations", "0001_squashed_0002")
out = io.StringIO()
try:
call_command("migrate", "migrations", prune=True, stdout=out, no_color=True)
self.assertEqual(
out.getvalue(),
"Pruning migrations:\n"
" Pruning migrations.0001_initial OK\n"
" Pruning migrations.0002_second OK\n",
)
applied_migrations = [
migration
for migration in recorder.applied_migrations()
if migration[0] == "migrations"
]
self.assertEqual(applied_migrations, [("migrations", "0001_squashed_0002")])
finally:
recorder.record_unapplied("migrations", "0001_initial")
recorder.record_unapplied("migrations", "0001_second")
recorder.record_unapplied("migrations", "0001_squashed_0002")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_prune_deleted_squashed_migrations_in_replaces(self):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_squashed"
) as migration_dir:
try:
call_command("migrate", "migrations", verbosity=0)
# Delete the replaced migrations.
os.remove(os.path.join(migration_dir, "0001_initial.py"))
os.remove(os.path.join(migration_dir, "0002_second.py"))
# --prune cannot be used before removing the "replaces"
# attribute.
call_command(
"migrate",
"migrations",
prune=True,
stdout=out,
no_color=True,
)
self.assertEqual(
out.getvalue(),
"Pruning migrations:\n"
" Cannot use --prune because the following squashed "
"migrations have their 'replaces' attributes and may not "
"be recorded as applied:\n"
" migrations.0001_squashed_0002\n"
" Re-run 'manage.py migrate' if they are not marked as "
"applied, and remove 'replaces' attributes in their "
"Migration classes.\n",
)
finally:
# Unmigrate everything.
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_prune_no_migrations_to_prune(self):
out = io.StringIO()
call_command("migrate", "migrations", prune=True, stdout=out, no_color=True)
self.assertEqual(
out.getvalue(),
"Pruning migrations:\n No migrations to prune.\n",
)
out = io.StringIO()
call_command(
"migrate",
"migrations",
prune=True,
stdout=out,
no_color=True,
verbosity=0,
)
self.assertEqual(out.getvalue(), "")
def test_prune_no_app_label(self):
msg = "Migrations can be pruned only when an app is specified."
with self.assertRaisesMessage(CommandError, msg):
call_command("migrate", prune=True)
class MakeMigrationsTests(MigrationTestBase):
"""
Tests running the makemigrations command.
"""
def setUp(self):
super().setUp()
self._old_models = apps.app_configs["migrations"].models.copy()
def tearDown(self):
apps.app_configs["migrations"].models = self._old_models
apps.all_models["migrations"] = self._old_models
apps.clear_cache()
super().tearDown()
def test_files_content(self):
self.assertTableNotExists("migrations_unicodemodel")
apps.register_model("migrations", UnicodeModel)
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", verbosity=0)
# Check for empty __init__.py file in migrations folder
init_file = os.path.join(migration_dir, "__init__.py")
self.assertTrue(os.path.exists(init_file))
with open(init_file) as fp:
content = fp.read()
self.assertEqual(content, "")
# Check for existing 0001_initial.py file in migration folder
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
with open(initial_file, encoding="utf-8") as fp:
content = fp.read()
self.assertIn("migrations.CreateModel", content)
self.assertIn("initial = True", content)
self.assertIn("úñí©óðé µóðéø", content) # Meta.verbose_name
self.assertIn("úñí©óðé µóðéøß", content) # Meta.verbose_name_plural
self.assertIn("ÚÑÍ¢ÓÐÉ", content) # title.verbose_name
self.assertIn("“Ðjáñgó”", content) # title.default
def test_makemigrations_order(self):
"""
makemigrations should recognize number-only migrations (0001.py).
"""
module = "migrations.test_migrations_order"
with self.temporary_migration_module(module=module) as migration_dir:
if hasattr(importlib, "invalidate_caches"):
# importlib caches os.listdir() on some platforms like macOS
# (#23850).
importlib.invalidate_caches()
call_command(
"makemigrations", "migrations", "--empty", "-n", "a", "-v", "0"
)
self.assertTrue(os.path.exists(os.path.join(migration_dir, "0002_a.py")))
def test_makemigrations_empty_connections(self):
empty_connections = ConnectionHandler({"default": {}})
with mock.patch(
"django.core.management.commands.makemigrations.connections",
new=empty_connections,
):
# with no apps
out = io.StringIO()
call_command("makemigrations", stdout=out)
self.assertIn("No changes detected", out.getvalue())
# with an app
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", verbosity=0)
init_file = os.path.join(migration_dir, "__init__.py")
self.assertTrue(os.path.exists(init_file))
@override_settings(INSTALLED_APPS=["migrations", "migrations2"])
def test_makemigrations_consistency_checks_respect_routers(self):
"""
The history consistency checks in makemigrations respect
settings.DATABASE_ROUTERS.
"""
def patched_has_table(migration_recorder):
if migration_recorder.connection is connections["other"]:
raise Exception("Other connection")
else:
return mock.DEFAULT
self.assertTableNotExists("migrations_unicodemodel")
apps.register_model("migrations", UnicodeModel)
with mock.patch.object(
MigrationRecorder, "has_table", autospec=True, side_effect=patched_has_table
) as has_table:
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", verbosity=0)
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
self.assertEqual(has_table.call_count, 1) # 'default' is checked
# Router says not to migrate 'other' so consistency shouldn't
# be checked.
with self.settings(DATABASE_ROUTERS=["migrations.routers.TestRouter"]):
call_command("makemigrations", "migrations", verbosity=0)
self.assertEqual(has_table.call_count, 2) # 'default' again
# With a router that doesn't prohibit migrating 'other',
# consistency is checked.
with self.settings(
DATABASE_ROUTERS=["migrations.routers.DefaultOtherRouter"]
):
with self.assertRaisesMessage(Exception, "Other connection"):
call_command("makemigrations", "migrations", verbosity=0)
self.assertEqual(has_table.call_count, 4) # 'default' and 'other'
# With a router that doesn't allow migrating on any database,
# no consistency checks are made.
with self.settings(DATABASE_ROUTERS=["migrations.routers.TestRouter"]):
with mock.patch.object(
TestRouter, "allow_migrate", return_value=False
) as allow_migrate:
call_command("makemigrations", "migrations", verbosity=0)
allow_migrate.assert_any_call(
"other", "migrations", model_name="UnicodeModel"
)
# allow_migrate() is called with the correct arguments.
self.assertGreater(len(allow_migrate.mock_calls), 0)
called_aliases = set()
for mock_call in allow_migrate.mock_calls:
_, call_args, call_kwargs = mock_call
connection_alias, app_name = call_args
called_aliases.add(connection_alias)
# Raises an error if invalid app_name/model_name occurs.
apps.get_app_config(app_name).get_model(call_kwargs["model_name"])
self.assertEqual(called_aliases, set(connections))
self.assertEqual(has_table.call_count, 4)
def test_failing_migration(self):
# If a migration fails to serialize, it shouldn't generate an empty file. #21280
apps.register_model("migrations", UnserializableModel)
with self.temporary_migration_module() as migration_dir:
with self.assertRaisesMessage(ValueError, "Cannot serialize"):
call_command("makemigrations", "migrations", verbosity=0)
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertFalse(os.path.exists(initial_file))
def test_makemigrations_conflict_exit(self):
"""
makemigrations exits if it detects a conflict.
"""
with self.temporary_migration_module(
module="migrations.test_migrations_conflict"
):
with self.assertRaises(CommandError) as context:
call_command("makemigrations")
self.assertEqual(
str(context.exception),
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (0002_conflicting_second, 0002_second in "
"migrations).\n"
"To fix them run 'python manage.py makemigrations --merge'",
)
def test_makemigrations_merge_no_conflict(self):
"""
makemigrations exits if in merge mode with no conflicts.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations"):
call_command("makemigrations", merge=True, stdout=out)
self.assertIn("No conflicts detected to merge.", out.getvalue())
def test_makemigrations_empty_no_app_specified(self):
"""
makemigrations exits if no app is specified with 'empty' mode.
"""
msg = "You must supply at least one app label when using --empty."
with self.assertRaisesMessage(CommandError, msg):
call_command("makemigrations", empty=True)
def test_makemigrations_empty_migration(self):
"""
makemigrations properly constructs an empty migration.
"""
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", empty=True, verbosity=0)
# Check for existing 0001_initial.py file in migration folder
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
with open(initial_file, encoding="utf-8") as fp:
content = fp.read()
# Remove all whitespace to check for empty dependencies and operations
content = content.replace(" ", "")
self.assertIn(
"dependencies=[]" if HAS_BLACK else "dependencies=[\n]", content
)
self.assertIn(
"operations=[]" if HAS_BLACK else "operations=[\n]", content
)
@override_settings(MIGRATION_MODULES={"migrations": None})
def test_makemigrations_disabled_migrations_for_app(self):
"""
makemigrations raises a nice error when migrations are disabled for an
app.
"""
msg = (
"Django can't create migrations for app 'migrations' because migrations "
"have been disabled via the MIGRATION_MODULES setting."
)
with self.assertRaisesMessage(ValueError, msg):
call_command("makemigrations", "migrations", empty=True, verbosity=0)
def test_makemigrations_no_changes_no_apps(self):
"""
makemigrations exits when there are no changes and no apps are specified.
"""
out = io.StringIO()
call_command("makemigrations", stdout=out)
self.assertIn("No changes detected", out.getvalue())
def test_makemigrations_no_changes(self):
"""
makemigrations exits when there are no changes to an app.
"""
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_no_changes"
):
call_command("makemigrations", "migrations", stdout=out)
self.assertIn("No changes detected in app 'migrations'", out.getvalue())
def test_makemigrations_no_apps_initial(self):
"""
makemigrations should detect initial is needed on empty migration
modules if no app provided.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_empty"):
call_command("makemigrations", stdout=out)
self.assertIn("0001_initial.py", out.getvalue())
def test_makemigrations_no_init(self):
"""Migration directories without an __init__.py file are allowed."""
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_no_init"
):
call_command("makemigrations", stdout=out)
self.assertIn("0001_initial.py", out.getvalue())
def test_makemigrations_migrations_announce(self):
"""
makemigrations announces the migration at the default verbosity level.
"""
out = io.StringIO()
with self.temporary_migration_module():
call_command("makemigrations", "migrations", stdout=out)
self.assertIn("Migrations for 'migrations'", out.getvalue())
def test_makemigrations_no_common_ancestor(self):
"""
makemigrations fails to merge migrations with no common ancestor.
"""
with self.assertRaises(ValueError) as context:
with self.temporary_migration_module(
module="migrations.test_migrations_no_ancestor"
):
call_command("makemigrations", "migrations", merge=True)
exception_message = str(context.exception)
self.assertIn("Could not find common ancestor of", exception_message)
self.assertIn("0002_second", exception_message)
self.assertIn("0002_conflicting_second", exception_message)
def test_makemigrations_interactive_reject(self):
"""
makemigrations enters and exits interactive mode properly.
"""
# Monkeypatch interactive questioner to auto reject
with mock.patch("builtins.input", mock.Mock(return_value="N")):
with self.temporary_migration_module(
module="migrations.test_migrations_conflict"
) as migration_dir:
with captured_stdout():
call_command(
"makemigrations",
"migrations",
name="merge",
merge=True,
interactive=True,
verbosity=0,
)
merge_file = os.path.join(migration_dir, "0003_merge.py")
self.assertFalse(os.path.exists(merge_file))
def test_makemigrations_interactive_accept(self):
"""
makemigrations enters interactive mode and merges properly.
"""
# Monkeypatch interactive questioner to auto accept
with mock.patch("builtins.input", mock.Mock(return_value="y")):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_conflict"
) as migration_dir:
call_command(
"makemigrations",
"migrations",
name="merge",
merge=True,
interactive=True,
stdout=out,
)
merge_file = os.path.join(migration_dir, "0003_merge.py")
self.assertTrue(os.path.exists(merge_file))
self.assertIn("Created new merge migration", out.getvalue())
def test_makemigrations_default_merge_name(self):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_conflict"
) as migration_dir:
call_command(
"makemigrations",
"migrations",
merge=True,
interactive=False,
stdout=out,
)
merge_file = os.path.join(
migration_dir,
"0003_merge_0002_conflicting_second_0002_second.py",
)
self.assertIs(os.path.exists(merge_file), True)
with open(merge_file, encoding="utf-8") as fp:
content = fp.read()
if HAS_BLACK:
target_str = '("migrations", "0002_conflicting_second")'
else:
target_str = "('migrations', '0002_conflicting_second')"
self.assertIn(target_str, content)
self.assertIn("Created new merge migration %s" % merge_file, out.getvalue())
@mock.patch("django.db.migrations.utils.datetime")
def test_makemigrations_auto_merge_name(self, mock_datetime):
mock_datetime.datetime.now.return_value = datetime.datetime(2016, 1, 2, 3, 4)
with mock.patch("builtins.input", mock.Mock(return_value="y")):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_conflict_long_name"
) as migration_dir:
call_command(
"makemigrations",
"migrations",
merge=True,
interactive=True,
stdout=out,
)
merge_file = os.path.join(migration_dir, "0003_merge_20160102_0304.py")
self.assertTrue(os.path.exists(merge_file))
self.assertIn("Created new merge migration", out.getvalue())
def test_makemigrations_non_interactive_not_null_addition(self):
"""
Non-interactive makemigrations fails when a default is missing on a
new not-null field.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_int = models.IntegerField()
class Meta:
app_label = "migrations"
with self.assertRaises(SystemExit):
with self.temporary_migration_module(
module="migrations.test_migrations_no_default"
):
with captured_stdout() as out:
call_command("makemigrations", "migrations", interactive=False)
self.assertIn(
"Field 'silly_int' on model 'sillymodel' not migrated: it is "
"impossible to add a non-nullable field without specifying a "
"default.",
out.getvalue(),
)
def test_makemigrations_interactive_not_null_addition(self):
"""
makemigrations messages when adding a NOT NULL field in interactive
mode.
"""
class Author(models.Model):
silly_field = models.BooleanField(null=False)
class Meta:
app_label = "migrations"
input_msg = (
"It is impossible to add a non-nullable field 'silly_field' to "
"author without specifying a default. This is because the "
"database needs something to populate existing rows.\n"
"Please select a fix:\n"
" 1) Provide a one-off default now (will be set on all existing "
"rows with a null value for this column)\n"
" 2) Quit and manually define a default value in models.py."
)
with self.temporary_migration_module(module="migrations.test_migrations"):
# 2 - quit.
with mock.patch("builtins.input", return_value="2"):
with captured_stdout() as out, self.assertRaises(SystemExit):
call_command("makemigrations", "migrations", interactive=True)
self.assertIn(input_msg, out.getvalue())
# 1 - provide a default.
with mock.patch("builtins.input", return_value="1"):
with captured_stdout() as out:
call_command("makemigrations", "migrations", interactive=True)
output = out.getvalue()
self.assertIn(input_msg, output)
self.assertIn("Please enter the default value as valid Python.", output)
self.assertIn(
"The datetime and django.utils.timezone modules are "
"available, so it is possible to provide e.g. timezone.now as "
"a value",
output,
)
self.assertIn("Type 'exit' to exit this prompt", output)
def test_makemigrations_non_interactive_not_null_alteration(self):
"""
Non-interactive makemigrations fails when a default is missing on a
field changed to not-null.
"""
class Author(models.Model):
name = models.CharField(max_length=255)
slug = models.SlugField()
age = models.IntegerField(default=0)
class Meta:
app_label = "migrations"
with self.temporary_migration_module(module="migrations.test_migrations"):
with captured_stdout() as out:
call_command("makemigrations", "migrations", interactive=False)
self.assertIn("Alter field slug on author", out.getvalue())
self.assertIn(
"Field 'slug' on model 'author' given a default of NOT PROVIDED "
"and must be corrected.",
out.getvalue(),
)
def test_makemigrations_interactive_not_null_alteration(self):
"""
makemigrations messages when changing a NULL field to NOT NULL in
interactive mode.
"""
class Author(models.Model):
slug = models.SlugField(null=False)
class Meta:
app_label = "migrations"
input_msg = (
"It is impossible to change a nullable field 'slug' on author to "
"non-nullable without providing a default. This is because the "
"database needs something to populate existing rows.\n"
"Please select a fix:\n"
" 1) Provide a one-off default now (will be set on all existing "
"rows with a null value for this column)\n"
" 2) Ignore for now. Existing rows that contain NULL values will "
"have to be handled manually, for example with a RunPython or "
"RunSQL operation.\n"
" 3) Quit and manually define a default value in models.py."
)
with self.temporary_migration_module(module="migrations.test_migrations"):
# No message appears if --dry-run.
with captured_stdout() as out:
call_command(
"makemigrations",
"migrations",
interactive=True,
dry_run=True,
)
self.assertNotIn(input_msg, out.getvalue())
# 3 - quit.
with mock.patch("builtins.input", return_value="3"):
with captured_stdout() as out, self.assertRaises(SystemExit):
call_command("makemigrations", "migrations", interactive=True)
self.assertIn(input_msg, out.getvalue())
# 1 - provide a default.
with mock.patch("builtins.input", return_value="1"):
with captured_stdout() as out:
call_command("makemigrations", "migrations", interactive=True)
output = out.getvalue()
self.assertIn(input_msg, output)
self.assertIn("Please enter the default value as valid Python.", output)
self.assertIn(
"The datetime and django.utils.timezone modules are "
"available, so it is possible to provide e.g. timezone.now as "
"a value",
output,
)
self.assertIn("Type 'exit' to exit this prompt", output)
def test_makemigrations_non_interactive_no_model_rename(self):
"""
makemigrations adds and removes a possible model rename in
non-interactive mode.
"""
class RenamedModel(models.Model):
silly_field = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_no_default"
):
call_command("makemigrations", "migrations", interactive=False, stdout=out)
self.assertIn("Delete model SillyModel", out.getvalue())
self.assertIn("Create model RenamedModel", out.getvalue())
def test_makemigrations_non_interactive_no_field_rename(self):
"""
makemigrations adds and removes a possible field rename in
non-interactive mode.
"""
class SillyModel(models.Model):
silly_rename = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_no_default"
):
call_command("makemigrations", "migrations", interactive=False, stdout=out)
self.assertIn("Remove field silly_field from sillymodel", out.getvalue())
self.assertIn("Add field silly_rename to sillymodel", out.getvalue())
@mock.patch("builtins.input", return_value="Y")
def test_makemigrations_model_rename_interactive(self, mock_input):
class RenamedModel(models.Model):
silly_field = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
with self.temporary_migration_module(
module="migrations.test_migrations_no_default",
):
with captured_stdout() as out:
call_command("makemigrations", "migrations", interactive=True)
self.assertIn("Rename model SillyModel to RenamedModel", out.getvalue())
@mock.patch("builtins.input", return_value="Y")
def test_makemigrations_field_rename_interactive(self, mock_input):
class SillyModel(models.Model):
silly_rename = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
with self.temporary_migration_module(
module="migrations.test_migrations_no_default",
):
with captured_stdout() as out:
call_command("makemigrations", "migrations", interactive=True)
self.assertIn(
"Rename field silly_field on sillymodel to silly_rename",
out.getvalue(),
)
def test_makemigrations_handle_merge(self):
"""
makemigrations properly merges the conflicting migrations with --noinput.
"""
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_conflict"
) as migration_dir:
call_command(
"makemigrations",
"migrations",
name="merge",
merge=True,
interactive=False,
stdout=out,
)
merge_file = os.path.join(migration_dir, "0003_merge.py")
self.assertTrue(os.path.exists(merge_file))
output = out.getvalue()
self.assertIn("Merging migrations", output)
self.assertIn("Branch 0002_second", output)
self.assertIn("Branch 0002_conflicting_second", output)
self.assertIn("Created new merge migration", output)
def test_makemigration_merge_dry_run(self):
"""
makemigrations respects --dry-run option when fixing migration
conflicts (#24427).
"""
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_conflict"
) as migration_dir:
call_command(
"makemigrations",
"migrations",
name="merge",
dry_run=True,
merge=True,
interactive=False,
stdout=out,
)
merge_file = os.path.join(migration_dir, "0003_merge.py")
self.assertFalse(os.path.exists(merge_file))
output = out.getvalue()
self.assertIn("Merging migrations", output)
self.assertIn("Branch 0002_second", output)
self.assertIn("Branch 0002_conflicting_second", output)
self.assertNotIn("Created new merge migration", output)
def test_makemigration_merge_dry_run_verbosity_3(self):
"""
`makemigrations --merge --dry-run` writes the merge migration file to
stdout with `verbosity == 3` (#24427).
"""
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_conflict"
) as migration_dir:
call_command(
"makemigrations",
"migrations",
name="merge",
dry_run=True,
merge=True,
interactive=False,
stdout=out,
verbosity=3,
)
merge_file = os.path.join(migration_dir, "0003_merge.py")
self.assertFalse(os.path.exists(merge_file))
output = out.getvalue()
self.assertIn("Merging migrations", output)
self.assertIn("Branch 0002_second", output)
self.assertIn("Branch 0002_conflicting_second", output)
self.assertNotIn("Created new merge migration", output)
# Additional output caused by verbosity 3
# The complete merge migration file that would be written
self.assertIn("class Migration(migrations.Migration):", output)
self.assertIn("dependencies = [", output)
self.assertIn("('migrations', '0002_second')", output)
self.assertIn("('migrations', '0002_conflicting_second')", output)
self.assertIn("operations = [", output)
self.assertIn("]", output)
def test_makemigrations_dry_run(self):
"""
`makemigrations --dry-run` should not ask for defaults.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_date = models.DateField() # Added field without a default
silly_auto_now = models.DateTimeField(auto_now_add=True)
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_no_default"
):
call_command("makemigrations", "migrations", dry_run=True, stdout=out)
# Output the expected changes directly, without asking for defaults
self.assertIn("Add field silly_date to sillymodel", out.getvalue())
def test_makemigrations_dry_run_verbosity_3(self):
"""
Allow `makemigrations --dry-run` to output the migrations file to
stdout (with verbosity == 3).
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_char = models.CharField(default="")
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_no_default"
):
call_command(
"makemigrations", "migrations", dry_run=True, stdout=out, verbosity=3
)
# Normal --dry-run output
self.assertIn("- Add field silly_char to sillymodel", out.getvalue())
# Additional output caused by verbosity 3
# The complete migrations file that would be written
self.assertIn("class Migration(migrations.Migration):", out.getvalue())
self.assertIn("dependencies = [", out.getvalue())
self.assertIn("('migrations', '0001_initial'),", out.getvalue())
self.assertIn("migrations.AddField(", out.getvalue())
self.assertIn("model_name='sillymodel',", out.getvalue())
self.assertIn("name='silly_char',", out.getvalue())
def test_makemigrations_scriptable(self):
"""
With scriptable=True, log output is diverted to stderr, and only the
paths of generated migration files are written to stdout.
"""
out = io.StringIO()
err = io.StringIO()
with self.temporary_migration_module(
module="migrations.migrations.test_migrations",
) as migration_dir:
call_command(
"makemigrations",
"migrations",
scriptable=True,
stdout=out,
stderr=err,
)
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertEqual(out.getvalue(), f"{initial_file}\n")
self.assertIn(" - Create model ModelWithCustomBase\n", err.getvalue())
@mock.patch("builtins.input", return_value="Y")
def test_makemigrations_scriptable_merge(self, mock_input):
out = io.StringIO()
err = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_conflict",
) as migration_dir:
call_command(
"makemigrations",
"migrations",
merge=True,
name="merge",
scriptable=True,
stdout=out,
stderr=err,
)
merge_file = os.path.join(migration_dir, "0003_merge.py")
self.assertEqual(out.getvalue(), f"{merge_file}\n")
self.assertIn(f"Created new merge migration {merge_file}", err.getvalue())
def test_makemigrations_migrations_modules_path_not_exist(self):
"""
makemigrations creates migrations when specifying a custom location
for migration files using MIGRATION_MODULES if the custom path
doesn't already exist.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
out = io.StringIO()
migration_module = "migrations.test_migrations_path_doesnt_exist.foo.bar"
with self.temporary_migration_module(module=migration_module) as migration_dir:
call_command("makemigrations", "migrations", stdout=out)
# Migrations file is actually created in the expected path.
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
# Command output indicates the migration is created.
self.assertIn(" - Create model SillyModel", out.getvalue())
@override_settings(MIGRATION_MODULES={"migrations": "some.nonexistent.path"})
def test_makemigrations_migrations_modules_nonexistent_toplevel_package(self):
msg = (
"Could not locate an appropriate location to create migrations "
"package some.nonexistent.path. Make sure the toplevel package "
"exists and can be imported."
)
with self.assertRaisesMessage(ValueError, msg):
call_command("makemigrations", "migrations", empty=True, verbosity=0)
def test_makemigrations_interactive_by_default(self):
"""
The user is prompted to merge by default if there are conflicts and
merge is True. Answer negative to differentiate it from behavior when
--noinput is specified.
"""
# Monkeypatch interactive questioner to auto reject
out = io.StringIO()
with mock.patch("builtins.input", mock.Mock(return_value="N")):
with self.temporary_migration_module(
module="migrations.test_migrations_conflict"
) as migration_dir:
call_command(
"makemigrations", "migrations", name="merge", merge=True, stdout=out
)
merge_file = os.path.join(migration_dir, "0003_merge.py")
# This will fail if interactive is False by default
self.assertFalse(os.path.exists(merge_file))
self.assertNotIn("Created new merge migration", out.getvalue())
@override_settings(
INSTALLED_APPS=[
"migrations",
"migrations.migrations_test_apps.unspecified_app_with_conflict",
]
)
def test_makemigrations_unspecified_app_with_conflict_no_merge(self):
"""
makemigrations does not raise a CommandError when an unspecified app
has conflicting migrations.
"""
with self.temporary_migration_module(
module="migrations.test_migrations_no_changes"
):
call_command("makemigrations", "migrations", merge=False, verbosity=0)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.unspecified_app_with_conflict",
]
)
def test_makemigrations_unspecified_app_with_conflict_merge(self):
"""
makemigrations does not create a merge for an unspecified app even if
it has conflicting migrations.
"""
# Monkeypatch interactive questioner to auto accept
with mock.patch("builtins.input", mock.Mock(return_value="y")):
out = io.StringIO()
with self.temporary_migration_module(
app_label="migrated_app"
) as migration_dir:
call_command(
"makemigrations",
"migrated_app",
name="merge",
merge=True,
interactive=True,
stdout=out,
)
merge_file = os.path.join(migration_dir, "0003_merge.py")
self.assertFalse(os.path.exists(merge_file))
self.assertIn("No conflicts detected to merge.", out.getvalue())
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.conflicting_app_with_dependencies",
]
)
def test_makemigrations_merge_dont_output_dependency_operations(self):
"""
makemigrations --merge does not output any operations from apps that
don't belong to a given app.
"""
# Monkeypatch interactive questioner to auto accept
with mock.patch("builtins.input", mock.Mock(return_value="N")):
out = io.StringIO()
with mock.patch(
"django.core.management.color.supports_color", lambda *args: False
):
call_command(
"makemigrations",
"conflicting_app_with_dependencies",
merge=True,
interactive=True,
stdout=out,
)
self.assertEqual(
out.getvalue().lower(),
"merging conflicting_app_with_dependencies\n"
" branch 0002_conflicting_second\n"
" - create model something\n"
" branch 0002_second\n"
" - delete model tribble\n"
" - remove field silly_field from author\n"
" - add field rating to author\n"
" - create model book\n"
"\n"
"merging will only work if the operations printed above do not "
"conflict\n"
"with each other (working on different fields or models)\n"
"should these migration branches be merged? [y/n] ",
)
def test_makemigrations_with_custom_name(self):
"""
makemigrations --name generate a custom migration name.
"""
with self.temporary_migration_module() as migration_dir:
def cmd(migration_count, migration_name, *args):
call_command(
"makemigrations",
"migrations",
"--verbosity",
"0",
"--name",
migration_name,
*args,
)
migration_file = os.path.join(
migration_dir, "%s_%s.py" % (migration_count, migration_name)
)
# Check for existing migration file in migration folder
self.assertTrue(os.path.exists(migration_file))
with open(migration_file, encoding="utf-8") as fp:
content = fp.read()
content = content.replace(" ", "")
return content
# generate an initial migration
migration_name_0001 = "my_initial_migration"
content = cmd("0001", migration_name_0001)
self.assertIn(
"dependencies=[]" if HAS_BLACK else "dependencies=[\n]", content
)
# importlib caches os.listdir() on some platforms like macOS
# (#23850).
if hasattr(importlib, "invalidate_caches"):
importlib.invalidate_caches()
# generate an empty migration
migration_name_0002 = "my_custom_migration"
content = cmd("0002", migration_name_0002, "--empty")
if HAS_BLACK:
template_str = 'dependencies=[\n("migrations","0001_%s"),\n]'
else:
template_str = "dependencies=[\n('migrations','0001_%s'),\n]"
self.assertIn(
template_str % migration_name_0001,
content,
)
self.assertIn("operations=[]" if HAS_BLACK else "operations=[\n]", content)
def test_makemigrations_with_invalid_custom_name(self):
msg = "The migration name must be a valid Python identifier."
with self.assertRaisesMessage(CommandError, msg):
call_command(
"makemigrations", "migrations", "--name", "invalid name", "--empty"
)
def test_makemigrations_check(self):
"""
makemigrations --check should exit with a non-zero status when
there are changes to an app requiring migrations.
"""
with self.temporary_migration_module():
with self.assertRaises(SystemExit):
call_command("makemigrations", "--check", "migrations", verbosity=0)
with self.temporary_migration_module(
module="migrations.test_migrations_no_changes"
):
call_command("makemigrations", "--check", "migrations", verbosity=0)
def test_makemigrations_migration_path_output(self):
"""
makemigrations should print the relative paths to the migrations unless
they are outside of the current tree, in which case the absolute path
should be shown.
"""
out = io.StringIO()
apps.register_model("migrations", UnicodeModel)
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", stdout=out)
self.assertIn(
os.path.join(migration_dir, "0001_initial.py"), out.getvalue()
)
def test_makemigrations_migration_path_output_valueerror(self):
"""
makemigrations prints the absolute path if os.path.relpath() raises a
ValueError when it's impossible to obtain a relative path, e.g. on
Windows if Django is installed on a different drive than where the
migration files are created.
"""
out = io.StringIO()
with self.temporary_migration_module() as migration_dir:
with mock.patch("os.path.relpath", side_effect=ValueError):
call_command("makemigrations", "migrations", stdout=out)
self.assertIn(os.path.join(migration_dir, "0001_initial.py"), out.getvalue())
def test_makemigrations_inconsistent_history(self):
"""
makemigrations should raise InconsistentMigrationHistory exception if
there are some migrations applied before their dependencies.
"""
recorder = MigrationRecorder(connection)
recorder.record_applied("migrations", "0002_second")
msg = (
"Migration migrations.0002_second is applied before its dependency "
"migrations.0001_initial"
)
with self.temporary_migration_module(module="migrations.test_migrations"):
with self.assertRaisesMessage(InconsistentMigrationHistory, msg):
call_command("makemigrations")
def test_makemigrations_inconsistent_history_db_failure(self):
msg = (
"Got an error checking a consistent migration history performed "
"for database connection 'default': could not connect to server"
)
with mock.patch(
"django.db.migrations.loader.MigrationLoader.check_consistent_history",
side_effect=OperationalError("could not connect to server"),
):
with self.temporary_migration_module():
with self.assertWarns(RuntimeWarning) as cm:
call_command("makemigrations", verbosity=0)
self.assertEqual(str(cm.warning), msg)
@mock.patch("builtins.input", return_value="1")
@mock.patch(
"django.db.migrations.questioner.sys.stdin",
mock.MagicMock(encoding=sys.getdefaultencoding()),
)
def test_makemigrations_auto_now_add_interactive(self, *args):
"""
makemigrations prompts the user when adding auto_now_add to an existing
model.
"""
class Entry(models.Model):
title = models.CharField(max_length=255)
creation_date = models.DateTimeField(auto_now_add=True)
class Meta:
app_label = "migrations"
input_msg = (
"It is impossible to add the field 'creation_date' with "
"'auto_now_add=True' to entry without providing a default. This "
"is because the database needs something to populate existing "
"rows.\n"
" 1) Provide a one-off default now which will be set on all "
"existing rows\n"
" 2) Quit and manually define a default value in models.py."
)
# Monkeypatch interactive questioner to auto accept
prompt_stdout = io.StringIO()
with self.temporary_migration_module(module="migrations.test_auto_now_add"):
call_command(
"makemigrations", "migrations", interactive=True, stdout=prompt_stdout
)
prompt_output = prompt_stdout.getvalue()
self.assertIn(input_msg, prompt_output)
self.assertIn("Please enter the default value as valid Python.", prompt_output)
self.assertIn(
"Accept the default 'timezone.now' by pressing 'Enter' or provide "
"another value.",
prompt_output,
)
self.assertIn("Type 'exit' to exit this prompt", prompt_output)
self.assertIn("Add field creation_date to entry", prompt_output)
@mock.patch("builtins.input", return_value="2")
def test_makemigrations_auto_now_add_interactive_quit(self, mock_input):
class Author(models.Model):
publishing_date = models.DateField(auto_now_add=True)
class Meta:
app_label = "migrations"
with self.temporary_migration_module(module="migrations.test_migrations"):
with captured_stdout():
with self.assertRaises(SystemExit):
call_command("makemigrations", "migrations", interactive=True)
def test_makemigrations_non_interactive_auto_now_add_addition(self):
"""
Non-interactive makemigrations fails when a default is missing on a
new field when auto_now_add=True.
"""
class Entry(models.Model):
creation_date = models.DateTimeField(auto_now_add=True)
class Meta:
app_label = "migrations"
with self.temporary_migration_module(module="migrations.test_auto_now_add"):
with self.assertRaises(SystemExit), captured_stdout() as out:
call_command("makemigrations", "migrations", interactive=False)
self.assertIn(
"Field 'creation_date' on model 'entry' not migrated: it is "
"impossible to add a field with 'auto_now_add=True' without "
"specifying a default.",
out.getvalue(),
)
def test_makemigrations_interactive_unique_callable_default_addition(self):
"""
makemigrations prompts the user when adding a unique field with
a callable default.
"""
class Book(models.Model):
created = models.DateTimeField(unique=True, default=timezone.now)
class Meta:
app_label = "migrations"
version = get_docs_version()
input_msg = (
f"Callable default on unique field book.created will not generate "
f"unique values upon migrating.\n"
f"Please choose how to proceed:\n"
f" 1) Continue making this migration as the first step in writing "
f"a manual migration to generate unique values described here: "
f"https://docs.djangoproject.com/en/{version}/howto/"
f"writing-migrations/#migrations-that-add-unique-fields.\n"
f" 2) Quit and edit field options in models.py.\n"
)
with self.temporary_migration_module(module="migrations.test_migrations"):
# 2 - quit.
with mock.patch("builtins.input", return_value="2"):
with captured_stdout() as out, self.assertRaises(SystemExit):
call_command("makemigrations", "migrations", interactive=True)
out_value = out.getvalue()
self.assertIn(input_msg, out_value)
self.assertNotIn("Add field created to book", out_value)
# 1 - continue.
with mock.patch("builtins.input", return_value="1"):
with captured_stdout() as out:
call_command("makemigrations", "migrations", interactive=True)
out_value = out.getvalue()
self.assertIn(input_msg, out_value)
self.assertIn("Add field created to book", out_value)
def test_makemigrations_non_interactive_unique_callable_default_addition(self):
class Book(models.Model):
created = models.DateTimeField(unique=True, default=timezone.now)
class Meta:
app_label = "migrations"
with self.temporary_migration_module(module="migrations.test_migrations"):
with captured_stdout() as out:
call_command("makemigrations", "migrations", interactive=False)
out_value = out.getvalue()
self.assertIn("Add field created to book", out_value)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"},
)
def test_makemigrations_continues_number_sequence_after_squash(self):
with self.temporary_migration_module(
module="migrations.test_migrations_squashed"
):
with captured_stdout() as out:
call_command(
"makemigrations",
"migrations",
interactive=False,
empty=True,
)
out_value = out.getvalue()
self.assertIn("0003_auto", out_value)
def test_makemigrations_update(self):
with self.temporary_migration_module(
module="migrations.test_migrations"
) as migration_dir:
migration_file = os.path.join(migration_dir, "0002_second.py")
with open(migration_file) as fp:
initial_content = fp.read()
with captured_stdout() as out:
call_command("makemigrations", "migrations", update=True)
self.assertFalse(
any(
filename.startswith("0003")
for filename in os.listdir(migration_dir)
)
)
self.assertIs(os.path.exists(migration_file), False)
new_migration_file = os.path.join(
migration_dir,
"0002_delete_tribble_author_rating_modelwithcustombase_and_more.py",
)
with open(new_migration_file) as fp:
self.assertNotEqual(initial_content, fp.read())
self.assertIn(f"Deleted {migration_file}", out.getvalue())
def test_makemigrations_update_existing_name(self):
with self.temporary_migration_module(
module="migrations.test_auto_now_add"
) as migration_dir:
migration_file = os.path.join(migration_dir, "0001_initial.py")
with open(migration_file) as fp:
initial_content = fp.read()
with captured_stdout() as out:
call_command("makemigrations", "migrations", update=True)
self.assertIs(os.path.exists(migration_file), False)
new_migration_file = os.path.join(
migration_dir,
"0001_initial_updated.py",
)
with open(new_migration_file) as fp:
self.assertNotEqual(initial_content, fp.read())
self.assertIn(f"Deleted {migration_file}", out.getvalue())
def test_makemigrations_update_applied_migration(self):
recorder = MigrationRecorder(connection)
recorder.record_applied("migrations", "0001_initial")
recorder.record_applied("migrations", "0002_second")
with self.temporary_migration_module(module="migrations.test_migrations"):
msg = "Cannot update applied migration 'migrations.0002_second'."
with self.assertRaisesMessage(CommandError, msg):
call_command("makemigrations", "migrations", update=True)
def test_makemigrations_update_no_migration(self):
with self.temporary_migration_module(module="migrations.test_migrations_empty"):
msg = "App migrations has no migration, cannot update last migration."
with self.assertRaisesMessage(CommandError, msg):
call_command("makemigrations", "migrations", update=True)
def test_makemigrations_update_squash_migration(self):
with self.temporary_migration_module(
module="migrations.test_migrations_squashed"
):
msg = "Cannot update squash migration 'migrations.0001_squashed_0002'."
with self.assertRaisesMessage(CommandError, msg):
call_command("makemigrations", "migrations", update=True)
def test_makemigrations_update_manual_porting(self):
with self.temporary_migration_module(
module="migrations.test_migrations_plan"
) as migration_dir:
with captured_stdout() as out:
call_command("makemigrations", "migrations", update=True)
# Previous migration exists.
previous_migration_file = os.path.join(migration_dir, "0005_fifth.py")
self.assertIs(os.path.exists(previous_migration_file), True)
# New updated migration exists.
files = [f for f in os.listdir(migration_dir) if f.startswith("0005_auto")]
updated_migration_file = os.path.join(migration_dir, files[0])
self.assertIs(os.path.exists(updated_migration_file), True)
self.assertIn(
f"Updated migration {updated_migration_file} requires manual porting.\n"
f"Previous migration {previous_migration_file} was kept and must be "
f"deleted after porting functions manually.",
out.getvalue(),
)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.alter_fk.author_app",
"migrations.migrations_test_apps.alter_fk.book_app",
]
)
def test_makemigrations_update_dependency_migration(self):
with self.temporary_migration_module(app_label="book_app"):
msg = (
"Cannot update migration 'book_app.0001_initial' that migrations "
"'author_app.0002_alter_id' depend on."
)
with self.assertRaisesMessage(CommandError, msg):
call_command("makemigrations", "book_app", update=True)
class SquashMigrationsTests(MigrationTestBase):
"""
Tests running the squashmigrations command.
"""
def test_squashmigrations_squashes(self):
"""
squashmigrations squashes migrations.
"""
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations"
) as migration_dir:
call_command(
"squashmigrations",
"migrations",
"0002",
interactive=False,
stdout=out,
no_color=True,
)
squashed_migration_file = os.path.join(
migration_dir, "0001_squashed_0002_second.py"
)
self.assertTrue(os.path.exists(squashed_migration_file))
self.assertEqual(
out.getvalue(),
"Will squash the following migrations:\n"
" - 0001_initial\n"
" - 0002_second\n"
"Optimizing...\n"
" Optimized from 8 operations to 2 operations.\n"
"Created new squashed migration %s\n"
" You should commit this migration but leave the old ones in place;\n"
" the new migration will be used for new installs. Once you are sure\n"
" all instances of the codebase have applied the migrations you "
"squashed,\n"
" you can delete them.\n" % squashed_migration_file,
)
def test_squashmigrations_initial_attribute(self):
with self.temporary_migration_module(
module="migrations.test_migrations"
) as migration_dir:
call_command(
"squashmigrations", "migrations", "0002", interactive=False, verbosity=0
)
squashed_migration_file = os.path.join(
migration_dir, "0001_squashed_0002_second.py"
)
with open(squashed_migration_file, encoding="utf-8") as fp:
content = fp.read()
self.assertIn("initial = True", content)
def test_squashmigrations_optimizes(self):
"""
squashmigrations optimizes operations.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations"):
call_command(
"squashmigrations",
"migrations",
"0002",
interactive=False,
verbosity=1,
stdout=out,
)
self.assertIn("Optimized from 8 operations to 2 operations.", out.getvalue())
def test_ticket_23799_squashmigrations_no_optimize(self):
"""
squashmigrations --no-optimize doesn't optimize operations.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations"):
call_command(
"squashmigrations",
"migrations",
"0002",
interactive=False,
verbosity=1,
no_optimize=True,
stdout=out,
)
self.assertIn("Skipping optimization", out.getvalue())
def test_squashmigrations_valid_start(self):
"""
squashmigrations accepts a starting migration.
"""
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_no_changes"
) as migration_dir:
call_command(
"squashmigrations",
"migrations",
"0002",
"0003",
interactive=False,
verbosity=1,
stdout=out,
)
squashed_migration_file = os.path.join(
migration_dir, "0002_second_squashed_0003_third.py"
)
with open(squashed_migration_file, encoding="utf-8") as fp:
content = fp.read()
if HAS_BLACK:
test_str = ' ("migrations", "0001_initial")'
else:
test_str = " ('migrations', '0001_initial')"
self.assertIn(test_str, content)
self.assertNotIn("initial = True", content)
out = out.getvalue()
self.assertNotIn(" - 0001_initial", out)
self.assertIn(" - 0002_second", out)
self.assertIn(" - 0003_third", out)
def test_squashmigrations_invalid_start(self):
"""
squashmigrations doesn't accept a starting migration after the ending migration.
"""
with self.temporary_migration_module(
module="migrations.test_migrations_no_changes"
):
msg = (
"The migration 'migrations.0003_third' cannot be found. Maybe "
"it comes after the migration 'migrations.0002_second'"
)
with self.assertRaisesMessage(CommandError, msg):
call_command(
"squashmigrations",
"migrations",
"0003",
"0002",
interactive=False,
verbosity=0,
)
def test_squashed_name_with_start_migration_name(self):
"""--squashed-name specifies the new migration's name."""
squashed_name = "squashed_name"
with self.temporary_migration_module(
module="migrations.test_migrations"
) as migration_dir:
call_command(
"squashmigrations",
"migrations",
"0001",
"0002",
squashed_name=squashed_name,
interactive=False,
verbosity=0,
)
squashed_migration_file = os.path.join(
migration_dir, "0001_%s.py" % squashed_name
)
self.assertTrue(os.path.exists(squashed_migration_file))
def test_squashed_name_without_start_migration_name(self):
"""--squashed-name also works if a start migration is omitted."""
squashed_name = "squashed_name"
with self.temporary_migration_module(
module="migrations.test_migrations"
) as migration_dir:
call_command(
"squashmigrations",
"migrations",
"0001",
squashed_name=squashed_name,
interactive=False,
verbosity=0,
)
squashed_migration_file = os.path.join(
migration_dir, "0001_%s.py" % squashed_name
)
self.assertTrue(os.path.exists(squashed_migration_file))
def test_squashed_name_exists(self):
msg = "Migration 0001_initial already exists. Use a different name."
with self.temporary_migration_module(module="migrations.test_migrations"):
with self.assertRaisesMessage(CommandError, msg):
call_command(
"squashmigrations",
"migrations",
"0001",
"0002",
squashed_name="initial",
interactive=False,
verbosity=0,
)
def test_squashmigrations_manual_porting(self):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_manual_porting",
) as migration_dir:
call_command(
"squashmigrations",
"migrations",
"0002",
interactive=False,
stdout=out,
no_color=True,
)
squashed_migration_file = os.path.join(
migration_dir,
"0001_squashed_0002_second.py",
)
self.assertTrue(os.path.exists(squashed_migration_file))
black_warning = ""
if HAS_BLACK:
black_warning = (
"Squashed migration couldn't be formatted using the "
'"black" command. You can call it manually.\n'
)
self.assertEqual(
out.getvalue(),
f"Will squash the following migrations:\n"
f" - 0001_initial\n"
f" - 0002_second\n"
f"Optimizing...\n"
f" No optimizations possible.\n"
f"Created new squashed migration {squashed_migration_file}\n"
f" You should commit this migration but leave the old ones in place;\n"
f" the new migration will be used for new installs. Once you are sure\n"
f" all instances of the codebase have applied the migrations you "
f"squashed,\n"
f" you can delete them.\n"
f"Manual porting required\n"
f" Your migrations contained functions that must be manually copied "
f"over,\n"
f" as we could not safely copy their implementation.\n"
f" See the comment at the top of the squashed migration for details.\n"
+ black_warning,
)
class AppLabelErrorTests(TestCase):
"""
This class inherits TestCase because MigrationTestBase uses
`available_apps = ['migrations']` which means that it's the only installed
app. 'django.contrib.auth' must be in INSTALLED_APPS for some of these
tests.
"""
nonexistent_app_error = "No installed app with label 'nonexistent_app'."
did_you_mean_auth_error = (
"No installed app with label 'django.contrib.auth'. Did you mean 'auth'?"
)
def test_makemigrations_nonexistent_app_label(self):
err = io.StringIO()
with self.assertRaises(SystemExit):
call_command("makemigrations", "nonexistent_app", stderr=err)
self.assertIn(self.nonexistent_app_error, err.getvalue())
def test_makemigrations_app_name_specified_as_label(self):
err = io.StringIO()
with self.assertRaises(SystemExit):
call_command("makemigrations", "django.contrib.auth", stderr=err)
self.assertIn(self.did_you_mean_auth_error, err.getvalue())
def test_migrate_nonexistent_app_label(self):
with self.assertRaisesMessage(CommandError, self.nonexistent_app_error):
call_command("migrate", "nonexistent_app")
def test_migrate_app_name_specified_as_label(self):
with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error):
call_command("migrate", "django.contrib.auth")
def test_showmigrations_nonexistent_app_label(self):
err = io.StringIO()
with self.assertRaises(SystemExit):
call_command("showmigrations", "nonexistent_app", stderr=err)
self.assertIn(self.nonexistent_app_error, err.getvalue())
def test_showmigrations_app_name_specified_as_label(self):
err = io.StringIO()
with self.assertRaises(SystemExit):
call_command("showmigrations", "django.contrib.auth", stderr=err)
self.assertIn(self.did_you_mean_auth_error, err.getvalue())
def test_sqlmigrate_nonexistent_app_label(self):
with self.assertRaisesMessage(CommandError, self.nonexistent_app_error):
call_command("sqlmigrate", "nonexistent_app", "0002")
def test_sqlmigrate_app_name_specified_as_label(self):
with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error):
call_command("sqlmigrate", "django.contrib.auth", "0002")
def test_squashmigrations_nonexistent_app_label(self):
with self.assertRaisesMessage(CommandError, self.nonexistent_app_error):
call_command("squashmigrations", "nonexistent_app", "0002")
def test_squashmigrations_app_name_specified_as_label(self):
with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error):
call_command("squashmigrations", "django.contrib.auth", "0002")
def test_optimizemigration_nonexistent_app_label(self):
with self.assertRaisesMessage(CommandError, self.nonexistent_app_error):
call_command("optimizemigration", "nonexistent_app", "0002")
def test_optimizemigration_app_name_specified_as_label(self):
with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error):
call_command("optimizemigration", "django.contrib.auth", "0002")
class OptimizeMigrationTests(MigrationTestBase):
def test_no_optimization_possible(self):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations"
) as migration_dir:
call_command(
"optimizemigration", "migrations", "0002", stdout=out, no_color=True
)
migration_file = os.path.join(migration_dir, "0002_second.py")
self.assertTrue(os.path.exists(migration_file))
call_command(
"optimizemigration",
"migrations",
"0002",
stdout=out,
no_color=True,
verbosity=0,
)
self.assertEqual(out.getvalue(), "No optimizations possible.\n")
def test_optimization(self):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations"
) as migration_dir:
call_command(
"optimizemigration", "migrations", "0001", stdout=out, no_color=True
)
initial_migration_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_migration_file))
with open(initial_migration_file) as fp:
content = fp.read()
self.assertIn(
'("bool", models.BooleanField'
if HAS_BLACK
else "('bool', models.BooleanField",
content,
)
self.assertEqual(
out.getvalue(),
f"Optimizing from 4 operations to 2 operations.\n"
f"Optimized migration {initial_migration_file}\n",
)
def test_optimization_no_verbosity(self):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations"
) as migration_dir:
call_command(
"optimizemigration",
"migrations",
"0001",
stdout=out,
no_color=True,
verbosity=0,
)
initial_migration_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_migration_file))
with open(initial_migration_file) as fp:
content = fp.read()
self.assertIn(
'("bool", models.BooleanField'
if HAS_BLACK
else "('bool', models.BooleanField",
content,
)
self.assertEqual(out.getvalue(), "")
def test_creates_replace_migration_manual_porting(self):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_manual_porting"
) as migration_dir:
call_command(
"optimizemigration", "migrations", "0003", stdout=out, no_color=True
)
optimized_migration_file = os.path.join(
migration_dir, "0003_third_optimized.py"
)
self.assertTrue(os.path.exists(optimized_migration_file))
with open(optimized_migration_file) as fp:
content = fp.read()
self.assertIn("replaces = [", content)
black_warning = ""
if HAS_BLACK:
black_warning = (
"Optimized migration couldn't be formatted using the "
'"black" command. You can call it manually.\n'
)
self.assertEqual(
out.getvalue(),
"Optimizing from 3 operations to 2 operations.\n"
"Manual porting required\n"
" Your migrations contained functions that must be manually copied over,\n"
" as we could not safely copy their implementation.\n"
" See the comment at the top of the optimized migration for details.\n"
+ black_warning
+ f"Optimized migration {optimized_migration_file}\n",
)
def test_fails_squash_migration_manual_porting(self):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_manual_porting"
) as migration_dir:
version = get_docs_version()
msg = (
f"Migration will require manual porting but is already a squashed "
f"migration.\nTransition to a normal migration first: "
f"https://docs.djangoproject.com/en/{version}/topics/migrations/"
f"#squashing-migrations"
)
with self.assertRaisesMessage(CommandError, msg):
call_command("optimizemigration", "migrations", "0004", stdout=out)
optimized_migration_file = os.path.join(
migration_dir, "0004_fourth_optimized.py"
)
self.assertFalse(os.path.exists(optimized_migration_file))
self.assertEqual(
out.getvalue(), "Optimizing from 3 operations to 2 operations.\n"
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_optimizemigration_check(self):
with self.assertRaises(SystemExit):
call_command(
"optimizemigration", "--check", "migrations", "0001", verbosity=0
)
call_command("optimizemigration", "--check", "migrations", "0002", verbosity=0)
@override_settings(
INSTALLED_APPS=["migrations.migrations_test_apps.unmigrated_app_simple"],
)
def test_app_without_migrations(self):
msg = "App 'unmigrated_app_simple' does not have migrations."
with self.assertRaisesMessage(CommandError, msg):
call_command("optimizemigration", "unmigrated_app_simple", "0001")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_clashing_prefix"},
)
def test_ambigious_prefix(self):
msg = (
"More than one migration matches 'a' in app 'migrations'. Please "
"be more specific."
)
with self.assertRaisesMessage(CommandError, msg):
call_command("optimizemigration", "migrations", "a")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_unknown_prefix(self):
msg = "Cannot find a migration matching 'nonexistent' from app 'migrations'."
with self.assertRaisesMessage(CommandError, msg):
call_command("optimizemigration", "migrations", "nonexistent")
|
372dddb88d21448ab1a98f9ca3e5400a3c090fa32b690cac65d343266c5c8a26 | from django.core.exceptions import FieldDoesNotExist
from django.db import IntegrityError, connection, migrations, models, transaction
from django.db.migrations.migration import Migration
from django.db.migrations.operations.fields import FieldOperation
from django.db.migrations.state import ModelState, ProjectState
from django.db.models.functions import Abs
from django.db.transaction import atomic
from django.test import (
SimpleTestCase,
ignore_warnings,
override_settings,
skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext
from django.utils.deprecation import RemovedInDjango51Warning
from .models import FoodManager, FoodQuerySet, UnicodeModel
from .test_base import OperationTestBase
class Mixin:
pass
class OperationTests(OperationTestBase):
"""
Tests running the operations and making sure they do what they say they do.
Each test looks at their state changing, and then their database operation -
both forwards and backwards.
"""
def test_create_model(self):
"""
Tests the CreateModel operation.
Most other tests use this operation as part of setup, so check failures
here first.
"""
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
)
self.assertEqual(operation.describe(), "Create model Pony")
self.assertEqual(operation.migration_name_fragment, "pony")
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(new_state.models["test_crmo", "pony"].name, "Pony")
self.assertEqual(len(new_state.models["test_crmo", "pony"].fields), 2)
# Test the database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["fields", "name"])
# And default manager not in set
operation = migrations.CreateModel(
"Foo", fields=[], managers=[("objects", models.Manager())]
)
definition = operation.deconstruct()
self.assertNotIn("managers", definition[2])
def test_create_model_with_duplicate_field_name(self):
with self.assertRaisesMessage(
ValueError, "Found duplicate value pink in CreateModel fields argument."
):
migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.TextField()),
("pink", models.IntegerField(default=1)),
],
)
def test_create_model_with_duplicate_base(self):
message = "Found duplicate value test_crmo.pony in CreateModel bases argument."
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
"test_crmo.Pony",
"test_crmo.Pony",
),
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
"test_crmo.Pony",
"test_crmo.pony",
),
)
message = (
"Found duplicate value migrations.unicodemodel in CreateModel bases "
"argument."
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
UnicodeModel,
UnicodeModel,
),
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
UnicodeModel,
"migrations.unicodemodel",
),
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
UnicodeModel,
"migrations.UnicodeModel",
),
)
message = (
"Found duplicate value <class 'django.db.models.base.Model'> in "
"CreateModel bases argument."
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
models.Model,
models.Model,
),
)
message = (
"Found duplicate value <class 'migrations.test_operations.Mixin'> in "
"CreateModel bases argument."
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
Mixin,
Mixin,
),
)
def test_create_model_with_duplicate_manager_name(self):
with self.assertRaisesMessage(
ValueError,
"Found duplicate value objects in CreateModel managers argument.",
):
migrations.CreateModel(
"Pony",
fields=[],
managers=[
("objects", models.Manager()),
("objects", models.Manager()),
],
)
def test_create_model_with_unique_after(self):
"""
Tests the CreateModel operation directly followed by an
AlterUniqueTogether (bug #22844 - sqlite remake issues)
"""
operation1 = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
)
operation2 = migrations.CreateModel(
"Rider",
[
("id", models.AutoField(primary_key=True)),
("number", models.IntegerField(default=1)),
("pony", models.ForeignKey("test_crmoua.Pony", models.CASCADE)),
],
)
operation3 = migrations.AlterUniqueTogether(
"Rider",
[
("number", "pony"),
],
)
# Test the database alteration
project_state = ProjectState()
self.assertTableNotExists("test_crmoua_pony")
self.assertTableNotExists("test_crmoua_rider")
with connection.schema_editor() as editor:
new_state = project_state.clone()
operation1.state_forwards("test_crmoua", new_state)
operation1.database_forwards(
"test_crmoua", editor, project_state, new_state
)
project_state, new_state = new_state, new_state.clone()
operation2.state_forwards("test_crmoua", new_state)
operation2.database_forwards(
"test_crmoua", editor, project_state, new_state
)
project_state, new_state = new_state, new_state.clone()
operation3.state_forwards("test_crmoua", new_state)
operation3.database_forwards(
"test_crmoua", editor, project_state, new_state
)
self.assertTableExists("test_crmoua_pony")
self.assertTableExists("test_crmoua_rider")
def test_create_model_m2m(self):
"""
Test the creation of a model with a ManyToMany field and the
auto-created "through" model.
"""
project_state = self.set_up_test_model("test_crmomm")
operation = migrations.CreateModel(
"Stable",
[
("id", models.AutoField(primary_key=True)),
("ponies", models.ManyToManyField("Pony", related_name="stables")),
],
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_crmomm", new_state)
# Test the database alteration
self.assertTableNotExists("test_crmomm_stable_ponies")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmomm", editor, project_state, new_state)
self.assertTableExists("test_crmomm_stable")
self.assertTableExists("test_crmomm_stable_ponies")
self.assertColumnNotExists("test_crmomm_stable", "ponies")
# Make sure the M2M field actually works
with atomic():
Pony = new_state.apps.get_model("test_crmomm", "Pony")
Stable = new_state.apps.get_model("test_crmomm", "Stable")
stable = Stable.objects.create()
p1 = Pony.objects.create(pink=False, weight=4.55)
p2 = Pony.objects.create(pink=True, weight=5.43)
stable.ponies.add(p1, p2)
self.assertEqual(stable.ponies.count(), 2)
stable.ponies.all().delete()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crmomm", editor, new_state, project_state
)
self.assertTableNotExists("test_crmomm_stable")
self.assertTableNotExists("test_crmomm_stable_ponies")
@skipUnlessDBFeature("supports_collation_on_charfield", "supports_foreign_keys")
def test_create_fk_models_to_pk_field_db_collation(self):
"""Creation of models with a FK to a PK with db_collation."""
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
app_label = "test_cfkmtopkfdbc"
operations = [
migrations.CreateModel(
"Pony",
[
(
"id",
models.CharField(
primary_key=True,
max_length=10,
db_collation=collation,
),
),
],
)
]
project_state = self.apply_operations(app_label, ProjectState(), operations)
# ForeignKey.
new_state = project_state.clone()
operation = migrations.CreateModel(
"Rider",
[
("id", models.AutoField(primary_key=True)),
("pony", models.ForeignKey("Pony", models.CASCADE)),
],
)
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnCollation(f"{app_label}_rider", "pony_id", collation)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
# OneToOneField.
new_state = project_state.clone()
operation = migrations.CreateModel(
"ShetlandPony",
[
(
"pony",
models.OneToOneField("Pony", models.CASCADE, primary_key=True),
),
("cuteness", models.IntegerField(default=1)),
],
)
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnCollation(f"{app_label}_shetlandpony", "pony_id", collation)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
def test_create_model_inheritance(self):
"""
Tests the CreateModel operation on a multi-table inheritance setup.
"""
project_state = self.set_up_test_model("test_crmoih")
# Test the state alteration
operation = migrations.CreateModel(
"ShetlandPony",
[
(
"pony_ptr",
models.OneToOneField(
"test_crmoih.Pony",
models.CASCADE,
auto_created=True,
primary_key=True,
to_field="id",
serialize=False,
),
),
("cuteness", models.IntegerField(default=1)),
],
)
new_state = project_state.clone()
operation.state_forwards("test_crmoih", new_state)
self.assertIn(("test_crmoih", "shetlandpony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crmoih_shetlandpony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmoih", editor, project_state, new_state)
self.assertTableExists("test_crmoih_shetlandpony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crmoih", editor, new_state, project_state
)
self.assertTableNotExists("test_crmoih_shetlandpony")
def test_create_proxy_model(self):
"""
CreateModel ignores proxy models.
"""
project_state = self.set_up_test_model("test_crprmo")
# Test the state alteration
operation = migrations.CreateModel(
"ProxyPony",
[],
options={"proxy": True},
bases=("test_crprmo.Pony",),
)
self.assertEqual(operation.describe(), "Create proxy model ProxyPony")
new_state = project_state.clone()
operation.state_forwards("test_crprmo", new_state)
self.assertIn(("test_crprmo", "proxypony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crprmo", editor, project_state, new_state)
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crprmo", editor, new_state, project_state
)
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["bases", "fields", "name", "options"])
def test_create_unmanaged_model(self):
"""
CreateModel ignores unmanaged models.
"""
project_state = self.set_up_test_model("test_crummo")
# Test the state alteration
operation = migrations.CreateModel(
"UnmanagedPony",
[],
options={"proxy": True},
bases=("test_crummo.Pony",),
)
self.assertEqual(operation.describe(), "Create proxy model UnmanagedPony")
new_state = project_state.clone()
operation.state_forwards("test_crummo", new_state)
self.assertIn(("test_crummo", "unmanagedpony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crummo", editor, project_state, new_state)
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crummo", editor, new_state, project_state
)
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
@skipUnlessDBFeature("supports_table_check_constraints")
def test_create_model_with_constraint(self):
where = models.Q(pink__gt=2)
check_constraint = models.CheckConstraint(
check=where, name="test_constraint_pony_pink_gt_2"
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
],
options={"constraints": [check_constraint]},
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
# Test database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
with connection.cursor() as cursor:
with self.assertRaises(IntegrityError):
cursor.execute("INSERT INTO test_crmo_pony (id, pink) VALUES (1, 1)")
# Test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Test deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2]["options"]["constraints"], [check_constraint])
@skipUnlessDBFeature("supports_table_check_constraints")
def test_create_model_with_boolean_expression_in_check_constraint(self):
app_label = "test_crmobechc"
rawsql_constraint = models.CheckConstraint(
check=models.expressions.RawSQL(
"price < %s", (1000,), output_field=models.BooleanField()
),
name=f"{app_label}_price_lt_1000_raw",
)
wrapper_constraint = models.CheckConstraint(
check=models.expressions.ExpressionWrapper(
models.Q(price__gt=500) | models.Q(price__lt=500),
output_field=models.BooleanField(),
),
name=f"{app_label}_price_neq_500_wrap",
)
operation = migrations.CreateModel(
"Product",
[
("id", models.AutoField(primary_key=True)),
("price", models.IntegerField(null=True)),
],
options={"constraints": [rawsql_constraint, wrapper_constraint]},
)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Add table.
self.assertTableNotExists(app_label)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertTableExists(f"{app_label}_product")
insert_sql = f"INSERT INTO {app_label}_product (id, price) VALUES (%d, %d)"
with connection.cursor() as cursor:
with self.assertRaises(IntegrityError):
cursor.execute(insert_sql % (1, 1000))
cursor.execute(insert_sql % (1, 999))
with self.assertRaises(IntegrityError):
cursor.execute(insert_sql % (2, 500))
cursor.execute(insert_sql % (2, 499))
def test_create_model_with_partial_unique_constraint(self):
partial_unique_constraint = models.UniqueConstraint(
fields=["pink"],
condition=models.Q(weight__gt=5),
name="test_constraint_pony_pink_for_weight_gt_5_uniq",
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
("weight", models.FloatField()),
],
options={"constraints": [partial_unique_constraint]},
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
# Test database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
# Test constraint works
Pony = new_state.apps.get_model("test_crmo", "Pony")
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=6.0)
if connection.features.supports_partial_indexes:
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=7.0)
else:
Pony.objects.create(pink=1, weight=7.0)
# Test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Test deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2]["options"]["constraints"], [partial_unique_constraint]
)
def test_create_model_with_deferred_unique_constraint(self):
deferred_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="deferrable_pink_constraint",
deferrable=models.Deferrable.DEFERRED,
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
],
options={"constraints": [deferred_unique_constraint]},
)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
self.assertTableNotExists("test_crmo_pony")
# Create table.
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
Pony = new_state.apps.get_model("test_crmo", "Pony")
Pony.objects.create(pink=1)
if connection.features.supports_deferrable_unique_constraints:
# Unique constraint is deferred.
with transaction.atomic():
obj = Pony.objects.create(pink=1)
obj.pink = 2
obj.save()
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(
deferred_unique_constraint.name
)
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name)
obj = Pony.objects.create(pink=1)
obj.pink = 3
obj.save()
else:
Pony.objects.create(pink=1)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2]["options"]["constraints"],
[deferred_unique_constraint],
)
@skipUnlessDBFeature("supports_covering_indexes")
def test_create_model_with_covering_unique_constraint(self):
covering_unique_constraint = models.UniqueConstraint(
fields=["pink"],
include=["weight"],
name="test_constraint_pony_pink_covering_weight",
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
("weight", models.FloatField()),
],
options={"constraints": [covering_unique_constraint]},
)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
self.assertTableNotExists("test_crmo_pony")
# Create table.
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
Pony = new_state.apps.get_model("test_crmo", "Pony")
Pony.objects.create(pink=1, weight=4.0)
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=7.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2]["options"]["constraints"],
[covering_unique_constraint],
)
def test_create_model_managers(self):
"""
The managers on a model are set.
"""
project_state = self.set_up_test_model("test_cmoma")
# Test the state alteration
operation = migrations.CreateModel(
"Food",
fields=[
("id", models.AutoField(primary_key=True)),
],
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
],
)
self.assertEqual(operation.describe(), "Create model Food")
new_state = project_state.clone()
operation.state_forwards("test_cmoma", new_state)
self.assertIn(("test_cmoma", "food"), new_state.models)
managers = new_state.models["test_cmoma", "food"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
def test_delete_model(self):
"""
Tests the DeleteModel operation.
"""
project_state = self.set_up_test_model("test_dlmo")
# Test the state alteration
operation = migrations.DeleteModel("Pony")
self.assertEqual(operation.describe(), "Delete model Pony")
self.assertEqual(operation.migration_name_fragment, "delete_pony")
new_state = project_state.clone()
operation.state_forwards("test_dlmo", new_state)
self.assertNotIn(("test_dlmo", "pony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dlmo", editor, project_state, new_state)
self.assertTableNotExists("test_dlmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_dlmo", editor, new_state, project_state)
self.assertTableExists("test_dlmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "DeleteModel")
self.assertEqual(definition[1], [])
self.assertEqual(list(definition[2]), ["name"])
def test_delete_proxy_model(self):
"""
Tests the DeleteModel operation ignores proxy models.
"""
project_state = self.set_up_test_model("test_dlprmo", proxy_model=True)
# Test the state alteration
operation = migrations.DeleteModel("ProxyPony")
new_state = project_state.clone()
operation.state_forwards("test_dlprmo", new_state)
self.assertIn(("test_dlprmo", "proxypony"), project_state.models)
self.assertNotIn(("test_dlprmo", "proxypony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dlprmo", editor, project_state, new_state)
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_dlprmo", editor, new_state, project_state
)
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
def test_delete_mti_model(self):
project_state = self.set_up_test_model("test_dlmtimo", mti_model=True)
# Test the state alteration
operation = migrations.DeleteModel("ShetlandPony")
new_state = project_state.clone()
operation.state_forwards("test_dlmtimo", new_state)
self.assertIn(("test_dlmtimo", "shetlandpony"), project_state.models)
self.assertNotIn(("test_dlmtimo", "shetlandpony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlmtimo_pony")
self.assertTableExists("test_dlmtimo_shetlandpony")
self.assertColumnExists("test_dlmtimo_shetlandpony", "pony_ptr_id")
with connection.schema_editor() as editor:
operation.database_forwards(
"test_dlmtimo", editor, project_state, new_state
)
self.assertTableExists("test_dlmtimo_pony")
self.assertTableNotExists("test_dlmtimo_shetlandpony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_dlmtimo", editor, new_state, project_state
)
self.assertTableExists("test_dlmtimo_pony")
self.assertTableExists("test_dlmtimo_shetlandpony")
self.assertColumnExists("test_dlmtimo_shetlandpony", "pony_ptr_id")
def test_rename_model(self):
"""
Tests the RenameModel operation.
"""
project_state = self.set_up_test_model("test_rnmo", related_model=True)
# Test the state alteration
operation = migrations.RenameModel("Pony", "Horse")
self.assertEqual(operation.describe(), "Rename model Pony to Horse")
self.assertEqual(operation.migration_name_fragment, "rename_pony_horse")
# Test initial state and database
self.assertIn(("test_rnmo", "pony"), project_state.models)
self.assertNotIn(("test_rnmo", "horse"), project_state.models)
self.assertTableExists("test_rnmo_pony")
self.assertTableNotExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")
)
self.assertFKNotExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")
)
# Migrate forwards
new_state = project_state.clone()
atomic_rename = connection.features.supports_atomic_references_rename
new_state = self.apply_operations(
"test_rnmo", new_state, [operation], atomic=atomic_rename
)
# Test new state and database
self.assertNotIn(("test_rnmo", "pony"), new_state.models)
self.assertIn(("test_rnmo", "horse"), new_state.models)
# RenameModel also repoints all incoming FKs and M2Ms
self.assertEqual(
new_state.models["test_rnmo", "rider"].fields["pony"].remote_field.model,
"test_rnmo.Horse",
)
self.assertTableNotExists("test_rnmo_pony")
self.assertTableExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKNotExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")
)
self.assertFKExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")
)
# Migrate backwards
original_state = self.unapply_operations(
"test_rnmo", project_state, [operation], atomic=atomic_rename
)
# Test original state and database
self.assertIn(("test_rnmo", "pony"), original_state.models)
self.assertNotIn(("test_rnmo", "horse"), original_state.models)
self.assertEqual(
original_state.models["test_rnmo", "rider"]
.fields["pony"]
.remote_field.model,
"Pony",
)
self.assertTableExists("test_rnmo_pony")
self.assertTableNotExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")
)
self.assertFKNotExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameModel")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"old_name": "Pony", "new_name": "Horse"})
def test_rename_model_state_forwards(self):
"""
RenameModel operations shouldn't trigger the caching of rendered apps
on state without prior apps.
"""
state = ProjectState()
state.add_model(ModelState("migrations", "Foo", []))
operation = migrations.RenameModel("Foo", "Bar")
operation.state_forwards("migrations", state)
self.assertNotIn("apps", state.__dict__)
self.assertNotIn(("migrations", "foo"), state.models)
self.assertIn(("migrations", "bar"), state.models)
# Now with apps cached.
apps = state.apps
operation = migrations.RenameModel("Bar", "Foo")
operation.state_forwards("migrations", state)
self.assertIs(state.apps, apps)
self.assertNotIn(("migrations", "bar"), state.models)
self.assertIn(("migrations", "foo"), state.models)
def test_rename_model_with_self_referential_fk(self):
"""
Tests the RenameModel operation on model with self referential FK.
"""
project_state = self.set_up_test_model("test_rmwsrf", related_model=True)
# Test the state alteration
operation = migrations.RenameModel("Rider", "HorseRider")
self.assertEqual(operation.describe(), "Rename model Rider to HorseRider")
new_state = project_state.clone()
operation.state_forwards("test_rmwsrf", new_state)
self.assertNotIn(("test_rmwsrf", "rider"), new_state.models)
self.assertIn(("test_rmwsrf", "horserider"), new_state.models)
# Remember, RenameModel also repoints all incoming FKs and M2Ms
self.assertEqual(
"self",
new_state.models["test_rmwsrf", "horserider"]
.fields["friend"]
.remote_field.model,
)
HorseRider = new_state.apps.get_model("test_rmwsrf", "horserider")
self.assertIs(
HorseRider._meta.get_field("horserider").remote_field.model, HorseRider
)
# Test the database alteration
self.assertTableExists("test_rmwsrf_rider")
self.assertTableNotExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id")
)
self.assertFKNotExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id")
)
atomic_rename = connection.features.supports_atomic_references_rename
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_forwards("test_rmwsrf", editor, project_state, new_state)
self.assertTableNotExists("test_rmwsrf_rider")
self.assertTableExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKNotExists(
"test_rmwsrf_horserider", ["friend_id"], ("test_rmwsrf_rider", "id")
)
self.assertFKExists(
"test_rmwsrf_horserider",
["friend_id"],
("test_rmwsrf_horserider", "id"),
)
# And test reversal
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_backwards(
"test_rmwsrf", editor, new_state, project_state
)
self.assertTableExists("test_rmwsrf_rider")
self.assertTableNotExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id")
)
self.assertFKNotExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id")
)
def test_rename_model_with_superclass_fk(self):
"""
Tests the RenameModel operation on a model which has a superclass that
has a foreign key.
"""
project_state = self.set_up_test_model(
"test_rmwsc", related_model=True, mti_model=True
)
# Test the state alteration
operation = migrations.RenameModel("ShetlandPony", "LittleHorse")
self.assertEqual(
operation.describe(), "Rename model ShetlandPony to LittleHorse"
)
new_state = project_state.clone()
operation.state_forwards("test_rmwsc", new_state)
self.assertNotIn(("test_rmwsc", "shetlandpony"), new_state.models)
self.assertIn(("test_rmwsc", "littlehorse"), new_state.models)
# RenameModel shouldn't repoint the superclass's relations, only local ones
self.assertEqual(
project_state.models["test_rmwsc", "rider"]
.fields["pony"]
.remote_field.model,
new_state.models["test_rmwsc", "rider"].fields["pony"].remote_field.model,
)
# Before running the migration we have a table for Shetland Pony, not
# Little Horse.
self.assertTableExists("test_rmwsc_shetlandpony")
self.assertTableNotExists("test_rmwsc_littlehorse")
if connection.features.supports_foreign_keys:
# and the foreign key on rider points to pony, not shetland pony
self.assertFKExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id")
)
self.assertFKNotExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_shetlandpony", "id")
)
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
operation.database_forwards("test_rmwsc", editor, project_state, new_state)
# Now we have a little horse table, not shetland pony
self.assertTableNotExists("test_rmwsc_shetlandpony")
self.assertTableExists("test_rmwsc_littlehorse")
if connection.features.supports_foreign_keys:
# but the Foreign keys still point at pony, not little horse
self.assertFKExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id")
)
self.assertFKNotExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_littlehorse", "id")
)
def test_rename_model_with_self_referential_m2m(self):
app_label = "test_rename_model_with_self_referential_m2m"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"ReflexivePony",
fields=[
("id", models.AutoField(primary_key=True)),
("ponies", models.ManyToManyField("self")),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("ReflexivePony", "ReflexivePony2"),
],
atomic=connection.features.supports_atomic_references_rename,
)
Pony = project_state.apps.get_model(app_label, "ReflexivePony2")
pony = Pony.objects.create()
pony.ponies.add(pony)
def test_rename_model_with_m2m(self):
app_label = "test_rename_model_with_m2m"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
],
),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("Pony", "Pony2"),
],
atomic=connection.features.supports_atomic_references_rename,
)
Pony = project_state.apps.get_model(app_label, "Pony2")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
self.assertEqual(Pony.objects.count(), 2)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(
Pony._meta.get_field("riders").remote_field.through.objects.count(), 2
)
def test_rename_model_with_db_table_noop(self):
app_label = "test_rmwdbtn"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
options={"db_table": "rider"},
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
(
"rider",
models.ForeignKey("%s.Rider" % app_label, models.CASCADE),
),
],
),
],
)
new_state = project_state.clone()
operation = migrations.RenameModel("Rider", "Runner")
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(app_label, editor, project_state, new_state)
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_backwards(
app_label, editor, new_state, project_state
)
def test_rename_m2m_target_model(self):
app_label = "test_rename_m2m_target_model"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
],
),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("Rider", "Rider2"),
],
atomic=connection.features.supports_atomic_references_rename,
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider2")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
self.assertEqual(Pony.objects.count(), 2)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(
Pony._meta.get_field("riders").remote_field.through.objects.count(), 2
)
def test_rename_m2m_through_model(self):
app_label = "test_rename_through"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"rider",
models.ForeignKey(
"test_rename_through.Rider", models.CASCADE
),
),
(
"pony",
models.ForeignKey(
"test_rename_through.Pony", models.CASCADE
),
),
],
),
migrations.AddField(
"Pony",
"riders",
models.ManyToManyField(
"test_rename_through.Rider",
through="test_rename_through.PonyRider",
),
),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
PonyRider = project_state.apps.get_model(app_label, "PonyRider")
pony = Pony.objects.create()
rider = Rider.objects.create()
PonyRider.objects.create(pony=pony, rider=rider)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("PonyRider", "PonyRider2"),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
PonyRider = project_state.apps.get_model(app_label, "PonyRider2")
pony = Pony.objects.first()
rider = Rider.objects.create()
PonyRider.objects.create(pony=pony, rider=rider)
self.assertEqual(Pony.objects.count(), 1)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(PonyRider.objects.count(), 2)
self.assertEqual(pony.riders.count(), 2)
def test_rename_m2m_model_after_rename_field(self):
"""RenameModel renames a many-to-many column after a RenameField."""
app_label = "test_rename_multiple"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=20)),
],
),
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey(
"test_rename_multiple.Pony", models.CASCADE
),
),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
],
),
migrations.RenameField(
model_name="pony", old_name="name", new_name="fancy_name"
),
migrations.RenameModel(old_name="Rider", new_name="Jockey"),
],
atomic=connection.features.supports_atomic_references_rename,
)
Pony = project_state.apps.get_model(app_label, "Pony")
Jockey = project_state.apps.get_model(app_label, "Jockey")
PonyRider = project_state.apps.get_model(app_label, "PonyRider")
# No "no such column" error means the column was renamed correctly.
pony = Pony.objects.create(fancy_name="a good name")
jockey = Jockey.objects.create(pony=pony)
ponyrider = PonyRider.objects.create()
ponyrider.riders.add(jockey)
def test_add_field(self):
"""
Tests the AddField operation.
"""
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=5),
)
self.assertEqual(operation.describe(), "Add field height to Pony")
self.assertEqual(operation.migration_name_fragment, "pony_height")
project_state, new_state = self.make_test_state("test_adfl", operation)
self.assertEqual(len(new_state.models["test_adfl", "pony"].fields), 4)
field = new_state.models["test_adfl", "pony"].fields["height"]
self.assertEqual(field.default, 5)
# Test the database alteration
self.assertColumnNotExists("test_adfl_pony", "height")
with connection.schema_editor() as editor:
operation.database_forwards("test_adfl", editor, project_state, new_state)
self.assertColumnExists("test_adfl_pony", "height")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adfl", editor, new_state, project_state)
self.assertColumnNotExists("test_adfl_pony", "height")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"])
def test_add_charfield(self):
"""
Tests the AddField operation on TextField.
"""
project_state = self.set_up_test_model("test_adchfl")
Pony = project_state.apps.get_model("test_adchfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations(
"test_adchfl",
project_state,
[
migrations.AddField(
"Pony",
"text",
models.CharField(max_length=10, default="some text"),
),
migrations.AddField(
"Pony",
"empty",
models.CharField(max_length=10, default=""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.CharField(max_length=10, default="42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.CharField(max_length=10, default='"\'"'),
),
],
)
Pony = new_state.apps.get_model("test_adchfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
self.assertEqual(pony.text, "some text")
self.assertEqual(pony.empty, "")
self.assertEqual(pony.digits, "42")
self.assertEqual(pony.quotes, '"\'"')
def test_add_textfield(self):
"""
Tests the AddField operation on TextField.
"""
project_state = self.set_up_test_model("test_adtxtfl")
Pony = project_state.apps.get_model("test_adtxtfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations(
"test_adtxtfl",
project_state,
[
migrations.AddField(
"Pony",
"text",
models.TextField(default="some text"),
),
migrations.AddField(
"Pony",
"empty",
models.TextField(default=""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.TextField(default="42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.TextField(default='"\'"'),
),
],
)
Pony = new_state.apps.get_model("test_adtxtfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
self.assertEqual(pony.text, "some text")
self.assertEqual(pony.empty, "")
self.assertEqual(pony.digits, "42")
self.assertEqual(pony.quotes, '"\'"')
def test_add_binaryfield(self):
"""
Tests the AddField operation on TextField/BinaryField.
"""
project_state = self.set_up_test_model("test_adbinfl")
Pony = project_state.apps.get_model("test_adbinfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations(
"test_adbinfl",
project_state,
[
migrations.AddField(
"Pony",
"blob",
models.BinaryField(default=b"some text"),
),
migrations.AddField(
"Pony",
"empty",
models.BinaryField(default=b""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.BinaryField(default=b"42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.BinaryField(default=b'"\'"'),
),
],
)
Pony = new_state.apps.get_model("test_adbinfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
# SQLite returns buffer/memoryview, cast to bytes for checking.
self.assertEqual(bytes(pony.blob), b"some text")
self.assertEqual(bytes(pony.empty), b"")
self.assertEqual(bytes(pony.digits), b"42")
self.assertEqual(bytes(pony.quotes), b'"\'"')
def test_column_name_quoting(self):
"""
Column names that are SQL keywords shouldn't cause problems when used
in migrations (#22168).
"""
project_state = self.set_up_test_model("test_regr22168")
operation = migrations.AddField(
"Pony",
"order",
models.IntegerField(default=0),
)
new_state = project_state.clone()
operation.state_forwards("test_regr22168", new_state)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_regr22168", editor, project_state, new_state
)
self.assertColumnExists("test_regr22168_pony", "order")
def test_add_field_preserve_default(self):
"""
Tests the AddField operation's state alteration
when preserve_default = False.
"""
project_state = self.set_up_test_model("test_adflpd")
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=4),
preserve_default=False,
)
new_state = project_state.clone()
operation.state_forwards("test_adflpd", new_state)
self.assertEqual(len(new_state.models["test_adflpd", "pony"].fields), 4)
field = new_state.models["test_adflpd", "pony"].fields["height"]
self.assertEqual(field.default, models.NOT_PROVIDED)
# Test the database alteration
project_state.apps.get_model("test_adflpd", "pony").objects.create(
weight=4,
)
self.assertColumnNotExists("test_adflpd_pony", "height")
with connection.schema_editor() as editor:
operation.database_forwards("test_adflpd", editor, project_state, new_state)
self.assertColumnExists("test_adflpd_pony", "height")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddField")
self.assertEqual(definition[1], [])
self.assertEqual(
sorted(definition[2]), ["field", "model_name", "name", "preserve_default"]
)
def test_add_field_m2m(self):
"""
Tests the AddField operation with a ManyToManyField.
"""
project_state = self.set_up_test_model("test_adflmm", second_model=True)
# Test the state alteration
operation = migrations.AddField(
"Pony", "stables", models.ManyToManyField("Stable", related_name="ponies")
)
new_state = project_state.clone()
operation.state_forwards("test_adflmm", new_state)
self.assertEqual(len(new_state.models["test_adflmm", "pony"].fields), 4)
# Test the database alteration
self.assertTableNotExists("test_adflmm_pony_stables")
with connection.schema_editor() as editor:
operation.database_forwards("test_adflmm", editor, project_state, new_state)
self.assertTableExists("test_adflmm_pony_stables")
self.assertColumnNotExists("test_adflmm_pony", "stables")
# Make sure the M2M field actually works
with atomic():
Pony = new_state.apps.get_model("test_adflmm", "Pony")
p = Pony.objects.create(pink=False, weight=4.55)
p.stables.create()
self.assertEqual(p.stables.count(), 1)
p.stables.all().delete()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_adflmm", editor, new_state, project_state
)
self.assertTableNotExists("test_adflmm_pony_stables")
def test_alter_field_m2m(self):
project_state = self.set_up_test_model("test_alflmm", second_model=True)
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField("Stable", related_name="ponies"),
)
],
)
Pony = project_state.apps.get_model("test_alflmm", "Pony")
self.assertFalse(Pony._meta.get_field("stables").blank)
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AlterField(
"Pony",
"stables",
models.ManyToManyField(
to="Stable", related_name="ponies", blank=True
),
)
],
)
Pony = project_state.apps.get_model("test_alflmm", "Pony")
self.assertTrue(Pony._meta.get_field("stables").blank)
def test_repoint_field_m2m(self):
project_state = self.set_up_test_model(
"test_alflmm", second_model=True, third_model=True
)
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AddField(
"Pony",
"places",
models.ManyToManyField("Stable", related_name="ponies"),
)
],
)
Pony = project_state.apps.get_model("test_alflmm", "Pony")
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AlterField(
"Pony",
"places",
models.ManyToManyField(to="Van", related_name="ponies"),
)
],
)
# Ensure the new field actually works
Pony = project_state.apps.get_model("test_alflmm", "Pony")
p = Pony.objects.create(pink=False, weight=4.55)
p.places.create()
self.assertEqual(p.places.count(), 1)
p.places.all().delete()
def test_remove_field_m2m(self):
project_state = self.set_up_test_model("test_rmflmm", second_model=True)
project_state = self.apply_operations(
"test_rmflmm",
project_state,
operations=[
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField("Stable", related_name="ponies"),
)
],
)
self.assertTableExists("test_rmflmm_pony_stables")
with_field_state = project_state.clone()
operations = [migrations.RemoveField("Pony", "stables")]
project_state = self.apply_operations(
"test_rmflmm", project_state, operations=operations
)
self.assertTableNotExists("test_rmflmm_pony_stables")
# And test reversal
self.unapply_operations("test_rmflmm", with_field_state, operations=operations)
self.assertTableExists("test_rmflmm_pony_stables")
def test_remove_field_m2m_with_through(self):
project_state = self.set_up_test_model("test_rmflmmwt", second_model=True)
self.assertTableNotExists("test_rmflmmwt_ponystables")
project_state = self.apply_operations(
"test_rmflmmwt",
project_state,
operations=[
migrations.CreateModel(
"PonyStables",
fields=[
(
"pony",
models.ForeignKey("test_rmflmmwt.Pony", models.CASCADE),
),
(
"stable",
models.ForeignKey("test_rmflmmwt.Stable", models.CASCADE),
),
],
),
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField(
"Stable",
related_name="ponies",
through="test_rmflmmwt.PonyStables",
),
),
],
)
self.assertTableExists("test_rmflmmwt_ponystables")
operations = [
migrations.RemoveField("Pony", "stables"),
migrations.DeleteModel("PonyStables"),
]
self.apply_operations("test_rmflmmwt", project_state, operations=operations)
def test_remove_field(self):
"""
Tests the RemoveField operation.
"""
project_state = self.set_up_test_model("test_rmfl")
# Test the state alteration
operation = migrations.RemoveField("Pony", "pink")
self.assertEqual(operation.describe(), "Remove field pink from Pony")
self.assertEqual(operation.migration_name_fragment, "remove_pony_pink")
new_state = project_state.clone()
operation.state_forwards("test_rmfl", new_state)
self.assertEqual(len(new_state.models["test_rmfl", "pony"].fields), 2)
# Test the database alteration
self.assertColumnExists("test_rmfl_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_rmfl", editor, project_state, new_state)
self.assertColumnNotExists("test_rmfl_pony", "pink")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rmfl", editor, new_state, project_state)
self.assertColumnExists("test_rmfl_pony", "pink")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveField")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": "pink"})
def test_remove_fk(self):
"""
Tests the RemoveField operation on a foreign key.
"""
project_state = self.set_up_test_model("test_rfk", related_model=True)
self.assertColumnExists("test_rfk_rider", "pony_id")
operation = migrations.RemoveField("Rider", "pony")
new_state = project_state.clone()
operation.state_forwards("test_rfk", new_state)
with connection.schema_editor() as editor:
operation.database_forwards("test_rfk", editor, project_state, new_state)
self.assertColumnNotExists("test_rfk_rider", "pony_id")
with connection.schema_editor() as editor:
operation.database_backwards("test_rfk", editor, new_state, project_state)
self.assertColumnExists("test_rfk_rider", "pony_id")
def test_alter_model_table(self):
"""
Tests the AlterModelTable operation.
"""
project_state = self.set_up_test_model("test_almota")
# Test the state alteration
operation = migrations.AlterModelTable("Pony", "test_almota_pony_2")
self.assertEqual(
operation.describe(), "Rename table for Pony to test_almota_pony_2"
)
self.assertEqual(operation.migration_name_fragment, "alter_pony_table")
new_state = project_state.clone()
operation.state_forwards("test_almota", new_state)
self.assertEqual(
new_state.models["test_almota", "pony"].options["db_table"],
"test_almota_pony_2",
)
# Test the database alteration
self.assertTableExists("test_almota_pony")
self.assertTableNotExists("test_almota_pony_2")
with connection.schema_editor() as editor:
operation.database_forwards("test_almota", editor, project_state, new_state)
self.assertTableNotExists("test_almota_pony")
self.assertTableExists("test_almota_pony_2")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_almota", editor, new_state, project_state
)
self.assertTableExists("test_almota_pony")
self.assertTableNotExists("test_almota_pony_2")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelTable")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"name": "Pony", "table": "test_almota_pony_2"})
def test_alter_model_table_none(self):
"""
Tests the AlterModelTable operation if the table name is set to None.
"""
operation = migrations.AlterModelTable("Pony", None)
self.assertEqual(operation.describe(), "Rename table for Pony to (default)")
def test_alter_model_table_noop(self):
"""
Tests the AlterModelTable operation if the table name is not changed.
"""
project_state = self.set_up_test_model("test_almota")
# Test the state alteration
operation = migrations.AlterModelTable("Pony", "test_almota_pony")
new_state = project_state.clone()
operation.state_forwards("test_almota", new_state)
self.assertEqual(
new_state.models["test_almota", "pony"].options["db_table"],
"test_almota_pony",
)
# Test the database alteration
self.assertTableExists("test_almota_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_almota", editor, project_state, new_state)
self.assertTableExists("test_almota_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_almota", editor, new_state, project_state
)
self.assertTableExists("test_almota_pony")
def test_alter_model_table_m2m(self):
"""
AlterModelTable should rename auto-generated M2M tables.
"""
app_label = "test_talflmltlm2m"
pony_db_table = "pony_foo"
project_state = self.set_up_test_model(
app_label, second_model=True, db_table=pony_db_table
)
# Add the M2M field
first_state = project_state.clone()
operation = migrations.AddField(
"Pony", "stables", models.ManyToManyField("Stable")
)
operation.state_forwards(app_label, first_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, first_state)
original_m2m_table = "%s_%s" % (pony_db_table, "stables")
new_m2m_table = "%s_%s" % (app_label, "pony_stables")
self.assertTableExists(original_m2m_table)
self.assertTableNotExists(new_m2m_table)
# Rename the Pony db_table which should also rename the m2m table.
second_state = first_state.clone()
operation = migrations.AlterModelTable(name="pony", table=None)
operation.state_forwards(app_label, second_state)
atomic_rename = connection.features.supports_atomic_references_rename
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_forwards(app_label, editor, first_state, second_state)
self.assertTableExists(new_m2m_table)
self.assertTableNotExists(original_m2m_table)
# And test reversal
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_backwards(app_label, editor, second_state, first_state)
self.assertTableExists(original_m2m_table)
self.assertTableNotExists(new_m2m_table)
def test_alter_field(self):
"""
Tests the AlterField operation.
"""
project_state = self.set_up_test_model("test_alfl")
# Test the state alteration
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(null=True)
)
self.assertEqual(operation.describe(), "Alter field pink on Pony")
self.assertEqual(operation.migration_name_fragment, "alter_pony_pink")
new_state = project_state.clone()
operation.state_forwards("test_alfl", new_state)
self.assertIs(
project_state.models["test_alfl", "pony"].fields["pink"].null, False
)
self.assertIs(new_state.models["test_alfl", "pony"].fields["pink"].null, True)
# Test the database alteration
self.assertColumnNotNull("test_alfl_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_alfl", editor, project_state, new_state)
self.assertColumnNull("test_alfl_pony", "pink")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alfl", editor, new_state, project_state)
self.assertColumnNotNull("test_alfl_pony", "pink")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"])
def test_alter_field_add_db_column_noop(self):
"""
AlterField operation is a noop when adding only a db_column and the
column name is not changed.
"""
app_label = "test_afadbn"
project_state = self.set_up_test_model(app_label, related_model=True)
pony_table = "%s_pony" % app_label
new_state = project_state.clone()
operation = migrations.AlterField(
"Pony", "weight", models.FloatField(db_column="weight")
)
operation.state_forwards(app_label, new_state)
self.assertIsNone(
project_state.models[app_label, "pony"].fields["weight"].db_column,
)
self.assertEqual(
new_state.models[app_label, "pony"].fields["weight"].db_column,
"weight",
)
self.assertColumnExists(pony_table, "weight")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnExists(pony_table, "weight")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_backwards(
app_label, editor, new_state, project_state
)
self.assertColumnExists(pony_table, "weight")
rider_table = "%s_rider" % app_label
new_state = project_state.clone()
operation = migrations.AlterField(
"Rider",
"pony",
models.ForeignKey("Pony", models.CASCADE, db_column="pony_id"),
)
operation.state_forwards(app_label, new_state)
self.assertIsNone(
project_state.models[app_label, "rider"].fields["pony"].db_column,
)
self.assertIs(
new_state.models[app_label, "rider"].fields["pony"].db_column,
"pony_id",
)
self.assertColumnExists(rider_table, "pony_id")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnExists(rider_table, "pony_id")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(app_label, editor, new_state, project_state)
self.assertColumnExists(rider_table, "pony_id")
def test_alter_field_pk(self):
"""
The AlterField operation on primary keys (things like PostgreSQL's
SERIAL weirdness).
"""
project_state = self.set_up_test_model("test_alflpk")
# Test the state alteration
operation = migrations.AlterField(
"Pony", "id", models.IntegerField(primary_key=True)
)
new_state = project_state.clone()
operation.state_forwards("test_alflpk", new_state)
self.assertIsInstance(
project_state.models["test_alflpk", "pony"].fields["id"],
models.AutoField,
)
self.assertIsInstance(
new_state.models["test_alflpk", "pony"].fields["id"],
models.IntegerField,
)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alflpk", editor, project_state, new_state)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alflpk", editor, new_state, project_state
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_pk_fk(self):
"""
Tests the AlterField operation on primary keys changes any FKs pointing to it.
"""
project_state = self.set_up_test_model("test_alflpkfk", related_model=True)
project_state = self.apply_operations(
"test_alflpkfk",
project_state,
[
migrations.CreateModel(
"Stable",
fields=[
("ponies", models.ManyToManyField("Pony")),
],
),
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField("Stable"),
),
],
)
# Test the state alteration
operation = migrations.AlterField(
"Pony", "id", models.FloatField(primary_key=True)
)
new_state = project_state.clone()
operation.state_forwards("test_alflpkfk", new_state)
self.assertIsInstance(
project_state.models["test_alflpkfk", "pony"].fields["id"],
models.AutoField,
)
self.assertIsInstance(
new_state.models["test_alflpkfk", "pony"].fields["id"],
models.FloatField,
)
def assertIdTypeEqualsFkType():
with connection.cursor() as cursor:
id_type, id_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor, "test_alflpkfk_pony"
)
if c.name == "id"
][0]
fk_type, fk_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor, "test_alflpkfk_rider"
)
if c.name == "pony_id"
][0]
m2m_fk_type, m2m_fk_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor,
"test_alflpkfk_pony_stables",
)
if c.name == "pony_id"
][0]
remote_m2m_fk_type, remote_m2m_fk_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor,
"test_alflpkfk_stable_ponies",
)
if c.name == "pony_id"
][0]
self.assertEqual(id_type, fk_type)
self.assertEqual(id_type, m2m_fk_type)
self.assertEqual(id_type, remote_m2m_fk_type)
self.assertEqual(id_null, fk_null)
self.assertEqual(id_null, m2m_fk_null)
self.assertEqual(id_null, remote_m2m_fk_null)
assertIdTypeEqualsFkType()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_alflpkfk", editor, project_state, new_state
)
assertIdTypeEqualsFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_alflpkfk_pony_stables",
["pony_id"],
("test_alflpkfk_pony", "id"),
)
self.assertFKExists(
"test_alflpkfk_stable_ponies",
["pony_id"],
("test_alflpkfk_pony", "id"),
)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alflpkfk", editor, new_state, project_state
)
assertIdTypeEqualsFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_alflpkfk_pony_stables",
["pony_id"],
("test_alflpkfk_pony", "id"),
)
self.assertFKExists(
"test_alflpkfk_stable_ponies",
["pony_id"],
("test_alflpkfk_pony", "id"),
)
@skipUnlessDBFeature("supports_collation_on_charfield", "supports_foreign_keys")
def test_alter_field_pk_fk_db_collation(self):
"""
AlterField operation of db_collation on primary keys changes any FKs
pointing to it.
"""
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
app_label = "test_alflpkfkdbc"
project_state = self.apply_operations(
app_label,
ProjectState(),
[
migrations.CreateModel(
"Pony",
[
("id", models.CharField(primary_key=True, max_length=10)),
],
),
migrations.CreateModel(
"Rider",
[
("pony", models.ForeignKey("Pony", models.CASCADE)),
],
),
migrations.CreateModel(
"Stable",
[
("ponies", models.ManyToManyField("Pony")),
],
),
],
)
# State alteration.
operation = migrations.AlterField(
"Pony",
"id",
models.CharField(
primary_key=True,
max_length=10,
db_collation=collation,
),
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Database alteration.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnCollation(f"{app_label}_pony", "id", collation)
self.assertColumnCollation(f"{app_label}_rider", "pony_id", collation)
self.assertColumnCollation(f"{app_label}_stable_ponies", "pony_id", collation)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
def test_alter_field_pk_mti_fk(self):
app_label = "test_alflpkmtifk"
project_state = self.set_up_test_model(app_label, mti_model=True)
project_state = self.apply_operations(
app_label,
project_state,
[
migrations.CreateModel(
"ShetlandRider",
fields=[
(
"pony",
models.ForeignKey(
f"{app_label}.ShetlandPony", models.CASCADE
),
),
],
),
],
)
operation = migrations.AlterField(
"Pony",
"id",
models.BigAutoField(primary_key=True),
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertIsInstance(
new_state.models[app_label, "pony"].fields["id"],
models.BigAutoField,
)
def _get_column_id_type(cursor, table, column):
return [
c.type_code
for c in connection.introspection.get_table_description(
cursor,
f"{app_label}_{table}",
)
if c.name == column
][0]
def assertIdTypeEqualsMTIFkType():
with connection.cursor() as cursor:
parent_id_type = _get_column_id_type(cursor, "pony", "id")
child_id_type = _get_column_id_type(
cursor, "shetlandpony", "pony_ptr_id"
)
mti_id_type = _get_column_id_type(cursor, "shetlandrider", "pony_id")
self.assertEqual(parent_id_type, child_id_type)
self.assertEqual(parent_id_type, mti_id_type)
assertIdTypeEqualsMTIFkType()
# Alter primary key.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
assertIdTypeEqualsMTIFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
f"{app_label}_shetlandpony",
["pony_ptr_id"],
(f"{app_label}_pony", "id"),
)
self.assertFKExists(
f"{app_label}_shetlandrider",
["pony_id"],
(f"{app_label}_shetlandpony", "pony_ptr_id"),
)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
assertIdTypeEqualsMTIFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
f"{app_label}_shetlandpony",
["pony_ptr_id"],
(f"{app_label}_pony", "id"),
)
self.assertFKExists(
f"{app_label}_shetlandrider",
["pony_id"],
(f"{app_label}_shetlandpony", "pony_ptr_id"),
)
def test_alter_field_pk_mti_and_fk_to_base(self):
app_label = "test_alflpkmtiftb"
project_state = self.set_up_test_model(
app_label,
mti_model=True,
related_model=True,
)
operation = migrations.AlterField(
"Pony",
"id",
models.BigAutoField(primary_key=True),
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertIsInstance(
new_state.models[app_label, "pony"].fields["id"],
models.BigAutoField,
)
def _get_column_id_type(cursor, table, column):
return [
c.type_code
for c in connection.introspection.get_table_description(
cursor,
f"{app_label}_{table}",
)
if c.name == column
][0]
def assertIdTypeEqualsMTIFkType():
with connection.cursor() as cursor:
parent_id_type = _get_column_id_type(cursor, "pony", "id")
fk_id_type = _get_column_id_type(cursor, "rider", "pony_id")
child_id_type = _get_column_id_type(
cursor, "shetlandpony", "pony_ptr_id"
)
self.assertEqual(parent_id_type, child_id_type)
self.assertEqual(parent_id_type, fk_id_type)
assertIdTypeEqualsMTIFkType()
# Alter primary key.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
assertIdTypeEqualsMTIFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
f"{app_label}_shetlandpony",
["pony_ptr_id"],
(f"{app_label}_pony", "id"),
)
self.assertFKExists(
f"{app_label}_rider",
["pony_id"],
(f"{app_label}_pony", "id"),
)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
assertIdTypeEqualsMTIFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
f"{app_label}_shetlandpony",
["pony_ptr_id"],
(f"{app_label}_pony", "id"),
)
self.assertFKExists(
f"{app_label}_rider",
["pony_id"],
(f"{app_label}_pony", "id"),
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_reloads_state_on_fk_with_to_field_target_type_change(self):
app_label = "test_alflrsfkwtflttc"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("code", models.IntegerField(unique=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
(
"rider",
models.ForeignKey(
"%s.Rider" % app_label, models.CASCADE, to_field="code"
),
),
],
),
],
)
operation = migrations.AlterField(
"Rider",
"code",
models.CharField(max_length=100, unique=True),
)
self.apply_operations(app_label, project_state, operations=[operation])
id_type, id_null = [
(c.type_code, c.null_ok)
for c in self.get_table_description("%s_rider" % app_label)
if c.name == "code"
][0]
fk_type, fk_null = [
(c.type_code, c.null_ok)
for c in self.get_table_description("%s_pony" % app_label)
if c.name == "rider_id"
][0]
self.assertEqual(id_type, fk_type)
self.assertEqual(id_null, fk_null)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_reloads_state_fk_with_to_field_related_name_target_type_change(
self,
):
app_label = "test_alflrsfkwtflrnttc"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("code", models.PositiveIntegerField(unique=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
(
"rider",
models.ForeignKey(
"%s.Rider" % app_label,
models.CASCADE,
to_field="code",
related_name="+",
),
),
],
),
],
)
operation = migrations.AlterField(
"Rider",
"code",
models.CharField(max_length=100, unique=True),
)
self.apply_operations(app_label, project_state, operations=[operation])
def test_alter_field_reloads_state_on_fk_target_changes(self):
"""
If AlterField doesn't reload state appropriately, the second AlterField
crashes on MySQL due to not dropping the PonyRider.pony foreign key
constraint before modifying the column.
"""
app_label = "alter_alter_field_reloads_state_on_fk_target_changes"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
(
"rider",
models.ForeignKey("%s.Rider" % app_label, models.CASCADE),
),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey("%s.Pony" % app_label, models.CASCADE),
),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.AlterField(
"Rider", "id", models.CharField(primary_key=True, max_length=99)
),
migrations.AlterField(
"Pony", "id", models.CharField(primary_key=True, max_length=99)
),
],
)
def test_alter_field_reloads_state_on_fk_with_to_field_target_changes(self):
"""
If AlterField doesn't reload state appropriately, the second AlterField
crashes on MySQL due to not dropping the PonyRider.pony foreign key
constraint before modifying the column.
"""
app_label = "alter_alter_field_reloads_state_on_fk_with_to_field_target_changes"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
("slug", models.CharField(unique=True, max_length=100)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
(
"rider",
models.ForeignKey(
"%s.Rider" % app_label, models.CASCADE, to_field="slug"
),
),
("slug", models.CharField(unique=True, max_length=100)),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey(
"%s.Pony" % app_label, models.CASCADE, to_field="slug"
),
),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.AlterField(
"Rider", "slug", models.CharField(unique=True, max_length=99)
),
migrations.AlterField(
"Pony", "slug", models.CharField(unique=True, max_length=99)
),
],
)
def test_rename_field_reloads_state_on_fk_target_changes(self):
"""
If RenameField doesn't reload state appropriately, the AlterField
crashes on MySQL due to not dropping the PonyRider.pony foreign key
constraint before modifying the column.
"""
app_label = "alter_rename_field_reloads_state_on_fk_target_changes"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
(
"rider",
models.ForeignKey("%s.Rider" % app_label, models.CASCADE),
),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey("%s.Pony" % app_label, models.CASCADE),
),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameField("Rider", "id", "id2"),
migrations.AlterField(
"Pony", "id", models.CharField(primary_key=True, max_length=99)
),
],
atomic=connection.features.supports_atomic_references_rename,
)
def test_rename_field(self):
"""
Tests the RenameField operation.
"""
project_state = self.set_up_test_model("test_rnfl")
operation = migrations.RenameField("Pony", "pink", "blue")
self.assertEqual(operation.describe(), "Rename field pink on Pony to blue")
self.assertEqual(operation.migration_name_fragment, "rename_pink_pony_blue")
new_state = project_state.clone()
operation.state_forwards("test_rnfl", new_state)
self.assertIn("blue", new_state.models["test_rnfl", "pony"].fields)
self.assertNotIn("pink", new_state.models["test_rnfl", "pony"].fields)
# Rename field.
self.assertColumnExists("test_rnfl_pony", "pink")
self.assertColumnNotExists("test_rnfl_pony", "blue")
with connection.schema_editor() as editor:
operation.database_forwards("test_rnfl", editor, project_state, new_state)
self.assertColumnExists("test_rnfl_pony", "blue")
self.assertColumnNotExists("test_rnfl_pony", "pink")
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards("test_rnfl", editor, new_state, project_state)
self.assertColumnExists("test_rnfl_pony", "pink")
self.assertColumnNotExists("test_rnfl_pony", "blue")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameField")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "old_name": "pink", "new_name": "blue"},
)
def test_rename_field_unique_together(self):
project_state = self.set_up_test_model("test_rnflut", unique_together=True)
operation = migrations.RenameField("Pony", "pink", "blue")
new_state = project_state.clone()
operation.state_forwards("test_rnflut", new_state)
# unique_together has the renamed column.
self.assertIn(
"blue",
new_state.models["test_rnflut", "pony"].options["unique_together"][0],
)
self.assertNotIn(
"pink",
new_state.models["test_rnflut", "pony"].options["unique_together"][0],
)
# Rename field.
self.assertColumnExists("test_rnflut_pony", "pink")
self.assertColumnNotExists("test_rnflut_pony", "blue")
with connection.schema_editor() as editor:
operation.database_forwards("test_rnflut", editor, project_state, new_state)
self.assertColumnExists("test_rnflut_pony", "blue")
self.assertColumnNotExists("test_rnflut_pony", "pink")
# The unique constraint has been ported over.
with connection.cursor() as cursor:
cursor.execute("INSERT INTO test_rnflut_pony (blue, weight) VALUES (1, 1)")
with self.assertRaises(IntegrityError):
with atomic():
cursor.execute(
"INSERT INTO test_rnflut_pony (blue, weight) VALUES (1, 1)"
)
cursor.execute("DELETE FROM test_rnflut_pony")
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(
"test_rnflut", editor, new_state, project_state
)
self.assertColumnExists("test_rnflut_pony", "pink")
self.assertColumnNotExists("test_rnflut_pony", "blue")
@ignore_warnings(category=RemovedInDjango51Warning)
def test_rename_field_index_together(self):
project_state = self.set_up_test_model("test_rnflit", index_together=True)
operation = migrations.RenameField("Pony", "pink", "blue")
new_state = project_state.clone()
operation.state_forwards("test_rnflit", new_state)
self.assertIn("blue", new_state.models["test_rnflit", "pony"].fields)
self.assertNotIn("pink", new_state.models["test_rnflit", "pony"].fields)
# index_together has the renamed column.
self.assertIn(
"blue", new_state.models["test_rnflit", "pony"].options["index_together"][0]
)
self.assertNotIn(
"pink", new_state.models["test_rnflit", "pony"].options["index_together"][0]
)
# Rename field.
self.assertColumnExists("test_rnflit_pony", "pink")
self.assertColumnNotExists("test_rnflit_pony", "blue")
with connection.schema_editor() as editor:
operation.database_forwards("test_rnflit", editor, project_state, new_state)
self.assertColumnExists("test_rnflit_pony", "blue")
self.assertColumnNotExists("test_rnflit_pony", "pink")
# The index constraint has been ported over.
self.assertIndexExists("test_rnflit_pony", ["weight", "blue"])
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(
"test_rnflit", editor, new_state, project_state
)
self.assertIndexExists("test_rnflit_pony", ["weight", "pink"])
def test_rename_field_with_db_column(self):
project_state = self.apply_operations(
"test_rfwdbc",
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField(db_column="db_field")),
(
"fk_field",
models.ForeignKey(
"Pony",
models.CASCADE,
db_column="db_fk_field",
),
),
],
),
],
)
new_state = project_state.clone()
operation = migrations.RenameField("Pony", "field", "renamed_field")
operation.state_forwards("test_rfwdbc", new_state)
self.assertIn("renamed_field", new_state.models["test_rfwdbc", "pony"].fields)
self.assertNotIn("field", new_state.models["test_rfwdbc", "pony"].fields)
self.assertColumnExists("test_rfwdbc_pony", "db_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(
"test_rfwdbc", editor, project_state, new_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_backwards(
"test_rfwdbc", editor, new_state, project_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_field")
new_state = project_state.clone()
operation = migrations.RenameField("Pony", "fk_field", "renamed_fk_field")
operation.state_forwards("test_rfwdbc", new_state)
self.assertIn(
"renamed_fk_field", new_state.models["test_rfwdbc", "pony"].fields
)
self.assertNotIn("fk_field", new_state.models["test_rfwdbc", "pony"].fields)
self.assertColumnExists("test_rfwdbc_pony", "db_fk_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(
"test_rfwdbc", editor, project_state, new_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_fk_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_backwards(
"test_rfwdbc", editor, new_state, project_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_fk_field")
def test_rename_field_case(self):
project_state = self.apply_operations(
"test_rfmx",
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField()),
],
),
],
)
new_state = project_state.clone()
operation = migrations.RenameField("Pony", "field", "FiElD")
operation.state_forwards("test_rfmx", new_state)
self.assertIn("FiElD", new_state.models["test_rfmx", "pony"].fields)
self.assertColumnExists("test_rfmx_pony", "field")
with connection.schema_editor() as editor:
operation.database_forwards("test_rfmx", editor, project_state, new_state)
self.assertColumnExists(
"test_rfmx_pony",
connection.introspection.identifier_converter("FiElD"),
)
with connection.schema_editor() as editor:
operation.database_backwards("test_rfmx", editor, new_state, project_state)
self.assertColumnExists("test_rfmx_pony", "field")
def test_rename_missing_field(self):
state = ProjectState()
state.add_model(ModelState("app", "model", []))
with self.assertRaisesMessage(
FieldDoesNotExist, "app.model has no field named 'field'"
):
migrations.RenameField("model", "field", "new_field").state_forwards(
"app", state
)
def test_rename_referenced_field_state_forward(self):
state = ProjectState()
state.add_model(
ModelState(
"app",
"Model",
[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField(unique=True)),
],
)
)
state.add_model(
ModelState(
"app",
"OtherModel",
[
("id", models.AutoField(primary_key=True)),
(
"fk",
models.ForeignKey("Model", models.CASCADE, to_field="field"),
),
(
"fo",
models.ForeignObject(
"Model",
models.CASCADE,
from_fields=("fk",),
to_fields=("field",),
),
),
],
)
)
operation = migrations.RenameField("Model", "field", "renamed")
new_state = state.clone()
operation.state_forwards("app", new_state)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fk"].remote_field.field_name,
"renamed",
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fk"].from_fields, ["self"]
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fk"].to_fields, ("renamed",)
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].from_fields, ("fk",)
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].to_fields, ("renamed",)
)
operation = migrations.RenameField("OtherModel", "fk", "renamed_fk")
new_state = state.clone()
operation.state_forwards("app", new_state)
self.assertEqual(
new_state.models["app", "othermodel"]
.fields["renamed_fk"]
.remote_field.field_name,
"renamed",
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["renamed_fk"].from_fields,
("self",),
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["renamed_fk"].to_fields,
("renamed",),
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].from_fields,
("renamed_fk",),
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].to_fields, ("renamed",)
)
def test_alter_unique_together(self):
"""
Tests the AlterUniqueTogether operation.
"""
project_state = self.set_up_test_model("test_alunto")
# Test the state alteration
operation = migrations.AlterUniqueTogether("Pony", [("pink", "weight")])
self.assertEqual(
operation.describe(), "Alter unique_together for Pony (1 constraint(s))"
)
self.assertEqual(
operation.migration_name_fragment,
"alter_pony_unique_together",
)
new_state = project_state.clone()
operation.state_forwards("test_alunto", new_state)
self.assertEqual(
len(
project_state.models["test_alunto", "pony"].options.get(
"unique_together", set()
)
),
0,
)
self.assertEqual(
len(
new_state.models["test_alunto", "pony"].options.get(
"unique_together", set()
)
),
1,
)
# Make sure we can insert duplicate rows
with connection.cursor() as cursor:
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_alunto", editor, project_state, new_state
)
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
with self.assertRaises(IntegrityError):
with atomic():
cursor.execute(
"INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)"
)
cursor.execute("DELETE FROM test_alunto_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alunto", editor, new_state, project_state
)
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# Test flat unique_together
operation = migrations.AlterUniqueTogether("Pony", ("pink", "weight"))
operation.state_forwards("test_alunto", new_state)
self.assertEqual(
len(
new_state.models["test_alunto", "pony"].options.get(
"unique_together", set()
)
),
1,
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterUniqueTogether")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"name": "Pony", "unique_together": {("pink", "weight")}}
)
def test_alter_unique_together_remove(self):
operation = migrations.AlterUniqueTogether("Pony", None)
self.assertEqual(
operation.describe(), "Alter unique_together for Pony (0 constraint(s))"
)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_unique_together_on_pk_field(self):
app_label = "test_rutopkf"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[("id", models.AutoField(primary_key=True))],
options={"unique_together": {("id",)}},
),
],
)
table_name = f"{app_label}_pony"
pk_constraint_name = f"{table_name}_pkey"
unique_together_constraint_name = f"{table_name}_id_fb61f881_uniq"
self.assertConstraintExists(table_name, pk_constraint_name, value=False)
self.assertConstraintExists(
table_name, unique_together_constraint_name, value=False
)
new_state = project_state.clone()
operation = migrations.AlterUniqueTogether("Pony", set())
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertConstraintExists(table_name, pk_constraint_name, value=False)
self.assertConstraintNotExists(table_name, unique_together_constraint_name)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_unique_together_on_unique_field(self):
app_label = "test_rutouf"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=30, unique=True)),
],
options={"unique_together": {("name",)}},
),
],
)
table_name = f"{app_label}_pony"
unique_constraint_name = f"{table_name}_name_key"
unique_together_constraint_name = f"{table_name}_name_694f3b9f_uniq"
self.assertConstraintExists(table_name, unique_constraint_name, value=False)
self.assertConstraintExists(
table_name, unique_together_constraint_name, value=False
)
new_state = project_state.clone()
operation = migrations.AlterUniqueTogether("Pony", set())
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertConstraintExists(table_name, unique_constraint_name, value=False)
self.assertConstraintNotExists(table_name, unique_together_constraint_name)
def test_add_index(self):
"""
Test the AddIndex operation.
"""
project_state = self.set_up_test_model("test_adin")
msg = (
"Indexes passed to AddIndex operations require a name argument. "
"<Index: fields=['pink']> doesn't have one."
)
with self.assertRaisesMessage(ValueError, msg):
migrations.AddIndex("Pony", models.Index(fields=["pink"]))
index = models.Index(fields=["pink"], name="test_adin_pony_pink_idx")
operation = migrations.AddIndex("Pony", index)
self.assertEqual(
operation.describe(),
"Create index test_adin_pony_pink_idx on field(s) pink of model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"pony_test_adin_pony_pink_idx",
)
new_state = project_state.clone()
operation.state_forwards("test_adin", new_state)
# Test the database alteration
self.assertEqual(
len(new_state.models["test_adin", "pony"].options["indexes"]), 1
)
self.assertIndexNotExists("test_adin_pony", ["pink"])
with connection.schema_editor() as editor:
operation.database_forwards("test_adin", editor, project_state, new_state)
self.assertIndexExists("test_adin_pony", ["pink"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adin", editor, new_state, project_state)
self.assertIndexNotExists("test_adin_pony", ["pink"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "index": index})
def test_remove_index(self):
"""
Test the RemoveIndex operation.
"""
project_state = self.set_up_test_model("test_rmin", multicol_index=True)
self.assertTableExists("test_rmin_pony")
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
operation = migrations.RemoveIndex("Pony", "pony_test_idx")
self.assertEqual(operation.describe(), "Remove index pony_test_idx from Pony")
self.assertEqual(
operation.migration_name_fragment,
"remove_pony_pony_test_idx",
)
new_state = project_state.clone()
operation.state_forwards("test_rmin", new_state)
# Test the state alteration
self.assertEqual(
len(new_state.models["test_rmin", "pony"].options["indexes"]), 0
)
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_rmin", editor, project_state, new_state)
self.assertIndexNotExists("test_rmin_pony", ["pink", "weight"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rmin", editor, new_state, project_state)
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": "pony_test_idx"})
# Also test a field dropped with index - sqlite remake issue
operations = [
migrations.RemoveIndex("Pony", "pony_test_idx"),
migrations.RemoveField("Pony", "pink"),
]
self.assertColumnExists("test_rmin_pony", "pink")
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
# Test database alteration
new_state = project_state.clone()
self.apply_operations("test_rmin", new_state, operations=operations)
self.assertColumnNotExists("test_rmin_pony", "pink")
self.assertIndexNotExists("test_rmin_pony", ["pink", "weight"])
# And test reversal
self.unapply_operations("test_rmin", project_state, operations=operations)
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
def test_rename_index(self):
app_label = "test_rnin"
project_state = self.set_up_test_model(app_label, index=True)
table_name = app_label + "_pony"
self.assertIndexNameExists(table_name, "pony_pink_idx")
self.assertIndexNameNotExists(table_name, "new_pony_test_idx")
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_test_idx", old_name="pony_pink_idx"
)
self.assertEqual(
operation.describe(),
"Rename index pony_pink_idx on Pony to new_pony_test_idx",
)
self.assertEqual(
operation.migration_name_fragment,
"rename_pony_pink_idx_new_pony_test_idx",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Rename index.
expected_queries = 1 if connection.features.can_rename_index else 2
with connection.schema_editor() as editor, self.assertNumQueries(
expected_queries
):
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameNotExists(table_name, "pony_pink_idx")
self.assertIndexNameExists(table_name, "new_pony_test_idx")
# Reversal.
with connection.schema_editor() as editor, self.assertNumQueries(
expected_queries
):
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, "pony_pink_idx")
self.assertIndexNameNotExists(table_name, "new_pony_test_idx")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameIndex")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"old_name": "pony_pink_idx",
"new_name": "new_pony_test_idx",
},
)
def test_rename_index_arguments(self):
msg = "RenameIndex.old_name and old_fields are mutually exclusive."
with self.assertRaisesMessage(ValueError, msg):
migrations.RenameIndex(
"Pony",
new_name="new_idx_name",
old_name="old_idx_name",
old_fields=("weight", "pink"),
)
msg = "RenameIndex requires one of old_name and old_fields arguments to be set."
with self.assertRaisesMessage(ValueError, msg):
migrations.RenameIndex("Pony", new_name="new_idx_name")
@ignore_warnings(category=RemovedInDjango51Warning)
def test_rename_index_unnamed_index(self):
app_label = "test_rninui"
project_state = self.set_up_test_model(app_label, index_together=True)
table_name = app_label + "_pony"
self.assertIndexNameNotExists(table_name, "new_pony_test_idx")
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_test_idx", old_fields=("weight", "pink")
)
self.assertEqual(
operation.describe(),
"Rename unnamed index for ('weight', 'pink') on Pony to new_pony_test_idx",
)
self.assertEqual(
operation.migration_name_fragment,
"rename_pony_weight_pink_new_pony_test_idx",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Rename index.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameExists(table_name, "new_pony_test_idx")
# Reverse is a no-op.
with connection.schema_editor() as editor, self.assertNumQueries(0):
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, "new_pony_test_idx")
# Reapply, RenameIndex operation is a noop when the old and new name
# match.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, "new_pony_test_idx")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameIndex")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"new_name": "new_pony_test_idx",
"old_fields": ("weight", "pink"),
},
)
def test_rename_index_unknown_unnamed_index(self):
app_label = "test_rninuui"
project_state = self.set_up_test_model(app_label)
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_test_idx", old_fields=("weight", "pink")
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
msg = "Found wrong number (0) of indexes for test_rninuui_pony(weight, pink)."
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
operation.database_forwards(app_label, editor, project_state, new_state)
def test_add_index_state_forwards(self):
project_state = self.set_up_test_model("test_adinsf")
index = models.Index(fields=["pink"], name="test_adinsf_pony_pink_idx")
old_model = project_state.apps.get_model("test_adinsf", "Pony")
new_state = project_state.clone()
operation = migrations.AddIndex("Pony", index)
operation.state_forwards("test_adinsf", new_state)
new_model = new_state.apps.get_model("test_adinsf", "Pony")
self.assertIsNot(old_model, new_model)
def test_remove_index_state_forwards(self):
project_state = self.set_up_test_model("test_rminsf")
index = models.Index(fields=["pink"], name="test_rminsf_pony_pink_idx")
migrations.AddIndex("Pony", index).state_forwards("test_rminsf", project_state)
old_model = project_state.apps.get_model("test_rminsf", "Pony")
new_state = project_state.clone()
operation = migrations.RemoveIndex("Pony", "test_rminsf_pony_pink_idx")
operation.state_forwards("test_rminsf", new_state)
new_model = new_state.apps.get_model("test_rminsf", "Pony")
self.assertIsNot(old_model, new_model)
def test_rename_index_state_forwards(self):
app_label = "test_rnidsf"
project_state = self.set_up_test_model(app_label, index=True)
old_model = project_state.apps.get_model(app_label, "Pony")
new_state = project_state.clone()
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_pink_idx", old_name="pony_pink_idx"
)
operation.state_forwards(app_label, new_state)
new_model = new_state.apps.get_model(app_label, "Pony")
self.assertIsNot(old_model, new_model)
self.assertEqual(new_model._meta.indexes[0].name, "new_pony_pink_idx")
@ignore_warnings(category=RemovedInDjango51Warning)
def test_rename_index_state_forwards_unnamed_index(self):
app_label = "test_rnidsfui"
project_state = self.set_up_test_model(app_label, index_together=True)
old_model = project_state.apps.get_model(app_label, "Pony")
new_state = project_state.clone()
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_pink_idx", old_fields=("weight", "pink")
)
operation.state_forwards(app_label, new_state)
new_model = new_state.apps.get_model(app_label, "Pony")
self.assertIsNot(old_model, new_model)
self.assertEqual(new_model._meta.index_together, tuple())
self.assertEqual(new_model._meta.indexes[0].name, "new_pony_pink_idx")
@skipUnlessDBFeature("supports_expression_indexes")
def test_add_func_index(self):
app_label = "test_addfuncin"
index_name = f"{app_label}_pony_abs_idx"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(app_label)
index = models.Index(Abs("weight"), name=index_name)
operation = migrations.AddIndex("Pony", index)
self.assertEqual(
operation.describe(),
"Create index test_addfuncin_pony_abs_idx on Abs(F(weight)) on model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"pony_test_addfuncin_pony_abs_idx",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(len(new_state.models[app_label, "pony"].options["indexes"]), 1)
self.assertIndexNameNotExists(table_name, index_name)
# Add index.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameExists(table_name, index_name)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameNotExists(table_name, index_name)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "index": index})
@skipUnlessDBFeature("supports_expression_indexes")
def test_remove_func_index(self):
app_label = "test_rmfuncin"
index_name = f"{app_label}_pony_abs_idx"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(
app_label,
indexes=[
models.Index(Abs("weight"), name=index_name),
],
)
self.assertTableExists(table_name)
self.assertIndexNameExists(table_name, index_name)
operation = migrations.RemoveIndex("Pony", index_name)
self.assertEqual(
operation.describe(),
"Remove index test_rmfuncin_pony_abs_idx from Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"remove_pony_test_rmfuncin_pony_abs_idx",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(len(new_state.models[app_label, "pony"].options["indexes"]), 0)
# Remove index.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameNotExists(table_name, index_name)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, index_name)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": index_name})
@skipUnlessDBFeature("supports_expression_indexes")
def test_alter_field_with_func_index(self):
app_label = "test_alfuncin"
index_name = f"{app_label}_pony_idx"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(
app_label,
indexes=[models.Index(Abs("pink"), name=index_name)],
)
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(null=True)
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameExists(table_name, index_name)
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, index_name)
def test_alter_field_with_index(self):
"""
Test AlterField operation with an index to ensure indexes created via
Meta.indexes don't get dropped with sqlite3 remake.
"""
project_state = self.set_up_test_model("test_alflin", index=True)
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(null=True)
)
new_state = project_state.clone()
operation.state_forwards("test_alflin", new_state)
# Test the database alteration
self.assertColumnNotNull("test_alflin_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_alflin", editor, project_state, new_state)
# Index hasn't been dropped
self.assertIndexExists("test_alflin_pony", ["pink"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alflin", editor, new_state, project_state
)
# Ensure the index is still there
self.assertIndexExists("test_alflin_pony", ["pink"])
@ignore_warnings(category=RemovedInDjango51Warning)
def test_alter_index_together(self):
"""
Tests the AlterIndexTogether operation.
"""
project_state = self.set_up_test_model("test_alinto")
# Test the state alteration
operation = migrations.AlterIndexTogether("Pony", [("pink", "weight")])
self.assertEqual(
operation.describe(), "Alter index_together for Pony (1 constraint(s))"
)
self.assertEqual(
operation.migration_name_fragment,
"alter_pony_index_together",
)
new_state = project_state.clone()
operation.state_forwards("test_alinto", new_state)
self.assertEqual(
len(
project_state.models["test_alinto", "pony"].options.get(
"index_together", set()
)
),
0,
)
self.assertEqual(
len(
new_state.models["test_alinto", "pony"].options.get(
"index_together", set()
)
),
1,
)
# Make sure there's no matching index
self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"])
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alinto", editor, project_state, new_state)
self.assertIndexExists("test_alinto_pony", ["pink", "weight"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alinto", editor, new_state, project_state
)
self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterIndexTogether")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"name": "Pony", "index_together": {("pink", "weight")}}
)
def test_alter_index_together_remove(self):
operation = migrations.AlterIndexTogether("Pony", None)
self.assertEqual(
operation.describe(), "Alter index_together for Pony (0 constraint(s))"
)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
@ignore_warnings(category=RemovedInDjango51Warning)
def test_alter_index_together_remove_with_unique_together(self):
app_label = "test_alintoremove_wunto"
table_name = "%s_pony" % app_label
project_state = self.set_up_test_model(app_label, unique_together=True)
self.assertUniqueConstraintExists(table_name, ["pink", "weight"])
# Add index together.
new_state = project_state.clone()
operation = migrations.AlterIndexTogether("Pony", [("pink", "weight")])
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexExists(table_name, ["pink", "weight"])
# Remove index together.
project_state = new_state
new_state = project_state.clone()
operation = migrations.AlterIndexTogether("Pony", set())
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNotExists(table_name, ["pink", "weight"])
self.assertUniqueConstraintExists(table_name, ["pink", "weight"])
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_constraint(self):
project_state = self.set_up_test_model("test_addconstraint")
gt_check = models.Q(pink__gt=2)
gt_constraint = models.CheckConstraint(
check=gt_check, name="test_add_constraint_pony_pink_gt_2"
)
gt_operation = migrations.AddConstraint("Pony", gt_constraint)
self.assertEqual(
gt_operation.describe(),
"Create constraint test_add_constraint_pony_pink_gt_2 on model Pony",
)
self.assertEqual(
gt_operation.migration_name_fragment,
"pony_test_add_constraint_pony_pink_gt_2",
)
# Test the state alteration
new_state = project_state.clone()
gt_operation.state_forwards("test_addconstraint", new_state)
self.assertEqual(
len(new_state.models["test_addconstraint", "pony"].options["constraints"]),
1,
)
Pony = new_state.apps.get_model("test_addconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
# Test the database alteration
with connection.schema_editor() as editor:
gt_operation.database_forwards(
"test_addconstraint", editor, project_state, new_state
)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=1.0)
# Add another one.
lt_check = models.Q(pink__lt=100)
lt_constraint = models.CheckConstraint(
check=lt_check, name="test_add_constraint_pony_pink_lt_100"
)
lt_operation = migrations.AddConstraint("Pony", lt_constraint)
lt_operation.state_forwards("test_addconstraint", new_state)
self.assertEqual(
len(new_state.models["test_addconstraint", "pony"].options["constraints"]),
2,
)
Pony = new_state.apps.get_model("test_addconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 2)
with connection.schema_editor() as editor:
lt_operation.database_forwards(
"test_addconstraint", editor, project_state, new_state
)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=100, weight=1.0)
# Test reversal
with connection.schema_editor() as editor:
gt_operation.database_backwards(
"test_addconstraint", editor, new_state, project_state
)
Pony.objects.create(pink=1, weight=1.0)
# Test deconstruction
definition = gt_operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"model_name": "Pony", "constraint": gt_constraint}
)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_constraint_percent_escaping(self):
app_label = "add_constraint_string_quoting"
operations = [
migrations.CreateModel(
"Author",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
("surname", models.CharField(max_length=100, default="")),
("rebate", models.CharField(max_length=100)),
],
),
]
from_state = self.apply_operations(app_label, ProjectState(), operations)
# "%" generated in startswith lookup should be escaped in a way that is
# considered a leading wildcard.
check = models.Q(name__startswith="Albert")
constraint = models.CheckConstraint(check=check, name="name_constraint")
operation = migrations.AddConstraint("Author", constraint)
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Author = to_state.apps.get_model(app_label, "Author")
with self.assertRaises(IntegrityError), transaction.atomic():
Author.objects.create(name="Artur")
# Literal "%" should be escaped in a way that is not a considered a
# wildcard.
check = models.Q(rebate__endswith="%")
constraint = models.CheckConstraint(check=check, name="rebate_constraint")
operation = migrations.AddConstraint("Author", constraint)
from_state = to_state
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
Author = to_state.apps.get_model(app_label, "Author")
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Author = to_state.apps.get_model(app_label, "Author")
with self.assertRaises(IntegrityError), transaction.atomic():
Author.objects.create(name="Albert", rebate="10$")
author = Author.objects.create(name="Albert", rebate="10%")
self.assertEqual(Author.objects.get(), author)
# Right-hand-side baked "%" literals should not be used for parameters
# interpolation.
check = ~models.Q(surname__startswith=models.F("name"))
constraint = models.CheckConstraint(check=check, name="name_constraint_rhs")
operation = migrations.AddConstraint("Author", constraint)
from_state = to_state
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Author = to_state.apps.get_model(app_label, "Author")
with self.assertRaises(IntegrityError), transaction.atomic():
Author.objects.create(name="Albert", surname="Alberto")
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_or_constraint(self):
app_label = "test_addorconstraint"
constraint_name = "add_constraint_or"
from_state = self.set_up_test_model(app_label)
check = models.Q(pink__gt=2, weight__gt=2) | models.Q(weight__lt=0)
constraint = models.CheckConstraint(check=check, name=constraint_name)
operation = migrations.AddConstraint("Pony", constraint)
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Pony = to_state.apps.get_model(app_label, "Pony")
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=2, weight=3.0)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=3, weight=1.0)
Pony.objects.bulk_create(
[
Pony(pink=3, weight=-1.0),
Pony(pink=1, weight=-1.0),
Pony(pink=3, weight=3.0),
]
)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_constraint_combinable(self):
app_label = "test_addconstraint_combinable"
operations = [
migrations.CreateModel(
"Book",
fields=[
("id", models.AutoField(primary_key=True)),
("read", models.PositiveIntegerField()),
("unread", models.PositiveIntegerField()),
],
),
]
from_state = self.apply_operations(app_label, ProjectState(), operations)
constraint = models.CheckConstraint(
check=models.Q(read=(100 - models.F("unread"))),
name="test_addconstraint_combinable_sum_100",
)
operation = migrations.AddConstraint("Book", constraint)
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Book = to_state.apps.get_model(app_label, "Book")
with self.assertRaises(IntegrityError), transaction.atomic():
Book.objects.create(read=70, unread=10)
Book.objects.create(read=70, unread=30)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_remove_constraint(self):
project_state = self.set_up_test_model(
"test_removeconstraint",
constraints=[
models.CheckConstraint(
check=models.Q(pink__gt=2),
name="test_remove_constraint_pony_pink_gt_2",
),
models.CheckConstraint(
check=models.Q(pink__lt=100),
name="test_remove_constraint_pony_pink_lt_100",
),
],
)
gt_operation = migrations.RemoveConstraint(
"Pony", "test_remove_constraint_pony_pink_gt_2"
)
self.assertEqual(
gt_operation.describe(),
"Remove constraint test_remove_constraint_pony_pink_gt_2 from model Pony",
)
self.assertEqual(
gt_operation.migration_name_fragment,
"remove_pony_test_remove_constraint_pony_pink_gt_2",
)
# Test state alteration
new_state = project_state.clone()
gt_operation.state_forwards("test_removeconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_removeconstraint", "pony"].options["constraints"]
),
1,
)
Pony = new_state.apps.get_model("test_removeconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
# Test database alteration
with connection.schema_editor() as editor:
gt_operation.database_forwards(
"test_removeconstraint", editor, project_state, new_state
)
Pony.objects.create(pink=1, weight=1.0).delete()
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=100, weight=1.0)
# Remove the other one.
lt_operation = migrations.RemoveConstraint(
"Pony", "test_remove_constraint_pony_pink_lt_100"
)
lt_operation.state_forwards("test_removeconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_removeconstraint", "pony"].options["constraints"]
),
0,
)
Pony = new_state.apps.get_model("test_removeconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
with connection.schema_editor() as editor:
lt_operation.database_forwards(
"test_removeconstraint", editor, project_state, new_state
)
Pony.objects.create(pink=100, weight=1.0).delete()
# Test reversal
with connection.schema_editor() as editor:
gt_operation.database_backwards(
"test_removeconstraint", editor, new_state, project_state
)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=1.0)
# Test deconstruction
definition = gt_operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "name": "test_remove_constraint_pony_pink_gt_2"},
)
def test_add_partial_unique_constraint(self):
project_state = self.set_up_test_model("test_addpartialuniqueconstraint")
partial_unique_constraint = models.UniqueConstraint(
fields=["pink"],
condition=models.Q(weight__gt=5),
name="test_constraint_pony_pink_for_weight_gt_5_uniq",
)
operation = migrations.AddConstraint("Pony", partial_unique_constraint)
self.assertEqual(
operation.describe(),
"Create constraint test_constraint_pony_pink_for_weight_gt_5_uniq "
"on model Pony",
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_addpartialuniqueconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_addpartialuniqueconstraint", "pony"].options[
"constraints"
]
),
1,
)
Pony = new_state.apps.get_model("test_addpartialuniqueconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_addpartialuniqueconstraint", editor, project_state, new_state
)
# Test constraint works
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=6.0)
if connection.features.supports_partial_indexes:
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=7.0)
else:
Pony.objects.create(pink=1, weight=7.0)
# Test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_addpartialuniqueconstraint", editor, new_state, project_state
)
# Test constraint doesn't work
Pony.objects.create(pink=1, weight=7.0)
# Test deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": partial_unique_constraint},
)
def test_remove_partial_unique_constraint(self):
project_state = self.set_up_test_model(
"test_removepartialuniqueconstraint",
constraints=[
models.UniqueConstraint(
fields=["pink"],
condition=models.Q(weight__gt=5),
name="test_constraint_pony_pink_for_weight_gt_5_uniq",
),
],
)
gt_operation = migrations.RemoveConstraint(
"Pony", "test_constraint_pony_pink_for_weight_gt_5_uniq"
)
self.assertEqual(
gt_operation.describe(),
"Remove constraint test_constraint_pony_pink_for_weight_gt_5_uniq from "
"model Pony",
)
# Test state alteration
new_state = project_state.clone()
gt_operation.state_forwards("test_removepartialuniqueconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_removepartialuniqueconstraint", "pony"].options[
"constraints"
]
),
0,
)
Pony = new_state.apps.get_model("test_removepartialuniqueconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
# Test database alteration
with connection.schema_editor() as editor:
gt_operation.database_forwards(
"test_removepartialuniqueconstraint", editor, project_state, new_state
)
# Test constraint doesn't work
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=6.0)
Pony.objects.create(pink=1, weight=7.0).delete()
# Test reversal
with connection.schema_editor() as editor:
gt_operation.database_backwards(
"test_removepartialuniqueconstraint", editor, new_state, project_state
)
# Test constraint works
if connection.features.supports_partial_indexes:
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=7.0)
else:
Pony.objects.create(pink=1, weight=7.0)
# Test deconstruction
definition = gt_operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"name": "test_constraint_pony_pink_for_weight_gt_5_uniq",
},
)
def test_add_deferred_unique_constraint(self):
app_label = "test_adddeferred_uc"
project_state = self.set_up_test_model(app_label)
deferred_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="deferred_pink_constraint_add",
deferrable=models.Deferrable.DEFERRED,
)
operation = migrations.AddConstraint("Pony", deferred_unique_constraint)
self.assertEqual(
operation.describe(),
"Create constraint deferred_pink_constraint_add on model Pony",
)
# Add constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 1
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
with connection.schema_editor() as editor, CaptureQueriesContext(
connection
) as ctx:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony.objects.create(pink=1, weight=4.0)
if connection.features.supports_deferrable_unique_constraints:
# Unique constraint is deferred.
with transaction.atomic():
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 2
obj.save()
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(
deferred_unique_constraint.name
)
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name)
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 3
obj.save()
else:
self.assertEqual(len(ctx), 0)
Pony.objects.create(pink=1, weight=4.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": deferred_unique_constraint},
)
def test_remove_deferred_unique_constraint(self):
app_label = "test_removedeferred_uc"
deferred_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="deferred_pink_constraint_rm",
deferrable=models.Deferrable.DEFERRED,
)
project_state = self.set_up_test_model(
app_label, constraints=[deferred_unique_constraint]
)
operation = migrations.RemoveConstraint("Pony", deferred_unique_constraint.name)
self.assertEqual(
operation.describe(),
"Remove constraint deferred_pink_constraint_rm from model Pony",
)
# Remove constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 0
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
with connection.schema_editor() as editor, CaptureQueriesContext(
connection
) as ctx:
operation.database_forwards(app_label, editor, project_state, new_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0).delete()
if not connection.features.supports_deferrable_unique_constraints:
self.assertEqual(len(ctx), 0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_deferrable_unique_constraints:
# Unique constraint is deferred.
with transaction.atomic():
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 2
obj.save()
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(
deferred_unique_constraint.name
)
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name)
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 3
obj.save()
else:
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"name": "deferred_pink_constraint_rm",
},
)
def test_add_covering_unique_constraint(self):
app_label = "test_addcovering_uc"
project_state = self.set_up_test_model(app_label)
covering_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="covering_pink_constraint_add",
include=["weight"],
)
operation = migrations.AddConstraint("Pony", covering_unique_constraint)
self.assertEqual(
operation.describe(),
"Create constraint covering_pink_constraint_add on model Pony",
)
# Add constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 1
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
with connection.schema_editor() as editor, CaptureQueriesContext(
connection
) as ctx:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony.objects.create(pink=1, weight=4.0)
if connection.features.supports_covering_indexes:
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=4.0)
else:
self.assertEqual(len(ctx), 0)
Pony.objects.create(pink=1, weight=4.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": covering_unique_constraint},
)
def test_remove_covering_unique_constraint(self):
app_label = "test_removecovering_uc"
covering_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="covering_pink_constraint_rm",
include=["weight"],
)
project_state = self.set_up_test_model(
app_label, constraints=[covering_unique_constraint]
)
operation = migrations.RemoveConstraint("Pony", covering_unique_constraint.name)
self.assertEqual(
operation.describe(),
"Remove constraint covering_pink_constraint_rm from model Pony",
)
# Remove constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 0
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
with connection.schema_editor() as editor, CaptureQueriesContext(
connection
) as ctx:
operation.database_forwards(app_label, editor, project_state, new_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0).delete()
if not connection.features.supports_covering_indexes:
self.assertEqual(len(ctx), 0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_covering_indexes:
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=4.0)
else:
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"name": "covering_pink_constraint_rm",
},
)
def test_alter_field_with_func_unique_constraint(self):
app_label = "test_alfuncuc"
constraint_name = f"{app_label}_pony_uq"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(
app_label,
constraints=[
models.UniqueConstraint("pink", "weight", name=constraint_name)
],
)
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(null=True)
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
def test_add_func_unique_constraint(self):
app_label = "test_adfuncuc"
constraint_name = f"{app_label}_pony_abs_uq"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(app_label)
constraint = models.UniqueConstraint(Abs("weight"), name=constraint_name)
operation = migrations.AddConstraint("Pony", constraint)
self.assertEqual(
operation.describe(),
"Create constraint test_adfuncuc_pony_abs_uq on model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"pony_test_adfuncuc_pony_abs_uq",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 1
)
self.assertIndexNameNotExists(table_name, constraint_name)
# Add constraint.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = new_state.apps.get_model(app_label, "Pony")
Pony.objects.create(weight=4.0)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
with self.assertRaises(IntegrityError):
Pony.objects.create(weight=-4.0)
else:
self.assertIndexNameNotExists(table_name, constraint_name)
Pony.objects.create(weight=-4.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameNotExists(table_name, constraint_name)
# Constraint doesn't work.
Pony.objects.create(weight=-4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": constraint},
)
def test_remove_func_unique_constraint(self):
app_label = "test_rmfuncuc"
constraint_name = f"{app_label}_pony_abs_uq"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(
app_label,
constraints=[
models.UniqueConstraint(Abs("weight"), name=constraint_name),
],
)
self.assertTableExists(table_name)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
operation = migrations.RemoveConstraint("Pony", constraint_name)
self.assertEqual(
operation.describe(),
"Remove constraint test_rmfuncuc_pony_abs_uq from model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"remove_pony_test_rmfuncuc_pony_abs_uq",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 0
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
# Remove constraint.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameNotExists(table_name, constraint_name)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=-4.0).delete()
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
with self.assertRaises(IntegrityError):
Pony.objects.create(weight=-4.0)
else:
self.assertIndexNameNotExists(table_name, constraint_name)
Pony.objects.create(weight=-4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": constraint_name})
def test_alter_model_options(self):
"""
Tests the AlterModelOptions operation.
"""
project_state = self.set_up_test_model("test_almoop")
# Test the state alteration (no DB alteration to test)
operation = migrations.AlterModelOptions(
"Pony", {"permissions": [("can_groom", "Can groom")]}
)
self.assertEqual(operation.describe(), "Change Meta options on Pony")
self.assertEqual(operation.migration_name_fragment, "alter_pony_options")
new_state = project_state.clone()
operation.state_forwards("test_almoop", new_state)
self.assertEqual(
len(
project_state.models["test_almoop", "pony"].options.get(
"permissions", []
)
),
0,
)
self.assertEqual(
len(new_state.models["test_almoop", "pony"].options.get("permissions", [])),
1,
)
self.assertEqual(
new_state.models["test_almoop", "pony"].options["permissions"][0][0],
"can_groom",
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelOptions")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"name": "Pony", "options": {"permissions": [("can_groom", "Can groom")]}},
)
def test_alter_model_options_emptying(self):
"""
The AlterModelOptions operation removes keys from the dict (#23121)
"""
project_state = self.set_up_test_model("test_almoop", options=True)
# Test the state alteration (no DB alteration to test)
operation = migrations.AlterModelOptions("Pony", {})
self.assertEqual(operation.describe(), "Change Meta options on Pony")
new_state = project_state.clone()
operation.state_forwards("test_almoop", new_state)
self.assertEqual(
len(
project_state.models["test_almoop", "pony"].options.get(
"permissions", []
)
),
1,
)
self.assertEqual(
len(new_state.models["test_almoop", "pony"].options.get("permissions", [])),
0,
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelOptions")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"name": "Pony", "options": {}})
def test_alter_order_with_respect_to(self):
"""
Tests the AlterOrderWithRespectTo operation.
"""
project_state = self.set_up_test_model("test_alorwrtto", related_model=True)
# Test the state alteration
operation = migrations.AlterOrderWithRespectTo("Rider", "pony")
self.assertEqual(
operation.describe(), "Set order_with_respect_to on Rider to pony"
)
self.assertEqual(
operation.migration_name_fragment,
"alter_rider_order_with_respect_to",
)
new_state = project_state.clone()
operation.state_forwards("test_alorwrtto", new_state)
self.assertIsNone(
project_state.models["test_alorwrtto", "rider"].options.get(
"order_with_respect_to", None
)
)
self.assertEqual(
new_state.models["test_alorwrtto", "rider"].options.get(
"order_with_respect_to", None
),
"pony",
)
# Make sure there's no matching index
self.assertColumnNotExists("test_alorwrtto_rider", "_order")
# Create some rows before alteration
rendered_state = project_state.apps
pony = rendered_state.get_model("test_alorwrtto", "Pony").objects.create(
weight=50
)
rider1 = rendered_state.get_model("test_alorwrtto", "Rider").objects.create(
pony=pony
)
rider1.friend = rider1
rider1.save()
rider2 = rendered_state.get_model("test_alorwrtto", "Rider").objects.create(
pony=pony
)
rider2.friend = rider2
rider2.save()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_alorwrtto", editor, project_state, new_state
)
self.assertColumnExists("test_alorwrtto_rider", "_order")
# Check for correct value in rows
updated_riders = new_state.apps.get_model(
"test_alorwrtto", "Rider"
).objects.all()
self.assertEqual(updated_riders[0]._order, 0)
self.assertEqual(updated_riders[1]._order, 0)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alorwrtto", editor, new_state, project_state
)
self.assertColumnNotExists("test_alorwrtto_rider", "_order")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterOrderWithRespectTo")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"name": "Rider", "order_with_respect_to": "pony"}
)
def test_alter_model_managers(self):
"""
The managers on a model are set.
"""
project_state = self.set_up_test_model("test_almoma")
# Test the state alteration
operation = migrations.AlterModelManagers(
"Pony",
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
],
)
self.assertEqual(operation.describe(), "Change managers on Pony")
self.assertEqual(operation.migration_name_fragment, "alter_pony_managers")
managers = project_state.models["test_almoma", "pony"].managers
self.assertEqual(managers, [])
new_state = project_state.clone()
operation.state_forwards("test_almoma", new_state)
self.assertIn(("test_almoma", "pony"), new_state.models)
managers = new_state.models["test_almoma", "pony"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
rendered_state = new_state.apps
model = rendered_state.get_model("test_almoma", "pony")
self.assertIsInstance(model.food_qs, models.Manager)
self.assertIsInstance(model.food_mgr, FoodManager)
self.assertIsInstance(model.food_mgr_kwargs, FoodManager)
def test_alter_model_managers_emptying(self):
"""
The managers on a model are set.
"""
project_state = self.set_up_test_model("test_almomae", manager_model=True)
# Test the state alteration
operation = migrations.AlterModelManagers("Food", managers=[])
self.assertEqual(operation.describe(), "Change managers on Food")
self.assertIn(("test_almomae", "food"), project_state.models)
managers = project_state.models["test_almomae", "food"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
new_state = project_state.clone()
operation.state_forwards("test_almomae", new_state)
managers = new_state.models["test_almomae", "food"].managers
self.assertEqual(managers, [])
def test_alter_fk(self):
"""
Creating and then altering an FK works correctly
and deals with the pending SQL (#23091)
"""
project_state = self.set_up_test_model("test_alfk")
# Test adding and then altering the FK in one go
create_operation = migrations.CreateModel(
name="Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("pony", models.ForeignKey("Pony", models.CASCADE)),
],
)
create_state = project_state.clone()
create_operation.state_forwards("test_alfk", create_state)
alter_operation = migrations.AlterField(
model_name="Rider",
name="pony",
field=models.ForeignKey("Pony", models.CASCADE, editable=False),
)
alter_state = create_state.clone()
alter_operation.state_forwards("test_alfk", alter_state)
with connection.schema_editor() as editor:
create_operation.database_forwards(
"test_alfk", editor, project_state, create_state
)
alter_operation.database_forwards(
"test_alfk", editor, create_state, alter_state
)
def test_alter_fk_non_fk(self):
"""
Altering an FK to a non-FK works (#23244)
"""
# Test the state alteration
operation = migrations.AlterField(
model_name="Rider",
name="pony",
field=models.FloatField(),
)
project_state, new_state = self.make_test_state(
"test_afknfk", operation, related_model=True
)
# Test the database alteration
self.assertColumnExists("test_afknfk_rider", "pony_id")
self.assertColumnNotExists("test_afknfk_rider", "pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_afknfk", editor, project_state, new_state)
self.assertColumnExists("test_afknfk_rider", "pony")
self.assertColumnNotExists("test_afknfk_rider", "pony_id")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_afknfk", editor, new_state, project_state
)
self.assertColumnExists("test_afknfk_rider", "pony_id")
self.assertColumnNotExists("test_afknfk_rider", "pony")
def test_run_sql(self):
"""
Tests the RunSQL operation.
"""
project_state = self.set_up_test_model("test_runsql")
# Create the operation
operation = migrations.RunSQL(
# Use a multi-line string with a comment to test splitting on
# SQLite and MySQL respectively.
"CREATE TABLE i_love_ponies (id int, special_thing varchar(15));\n"
"INSERT INTO i_love_ponies (id, special_thing) "
"VALUES (1, 'i love ponies'); -- this is magic!\n"
"INSERT INTO i_love_ponies (id, special_thing) "
"VALUES (2, 'i love django');\n"
"UPDATE i_love_ponies SET special_thing = 'Ponies' "
"WHERE special_thing LIKE '%%ponies';"
"UPDATE i_love_ponies SET special_thing = 'Django' "
"WHERE special_thing LIKE '%django';",
# Run delete queries to test for parameter substitution failure
# reported in #23426
"DELETE FROM i_love_ponies WHERE special_thing LIKE '%Django%';"
"DELETE FROM i_love_ponies WHERE special_thing LIKE '%%Ponies%%';"
"DROP TABLE i_love_ponies",
state_operations=[
migrations.CreateModel(
"SomethingElse", [("id", models.AutoField(primary_key=True))]
)
],
)
self.assertEqual(operation.describe(), "Raw SQL operation")
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_runsql", new_state)
self.assertEqual(
len(new_state.models["test_runsql", "somethingelse"].fields), 1
)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
# Test SQL collection
with connection.schema_editor(collect_sql=True) as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
self.assertIn("LIKE '%%ponies';", "\n".join(editor.collected_sql))
operation.database_backwards(
"test_runsql", editor, project_state, new_state
)
self.assertIn("LIKE '%%Ponies%%';", "\n".join(editor.collected_sql))
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
self.assertTableExists("i_love_ponies")
# Make sure all the SQL was processed
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 2)
cursor.execute(
"SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Django'"
)
self.assertEqual(cursor.fetchall()[0][0], 1)
cursor.execute(
"SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Ponies'"
)
self.assertEqual(cursor.fetchall()[0][0], 1)
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
self.assertTableNotExists("i_love_ponies")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunSQL")
self.assertEqual(definition[1], [])
self.assertEqual(
sorted(definition[2]), ["reverse_sql", "sql", "state_operations"]
)
# And elidable reduction
self.assertIs(False, operation.reduce(operation, []))
elidable_operation = migrations.RunSQL("SELECT 1 FROM void;", elidable=True)
self.assertEqual(elidable_operation.reduce(operation, []), [operation])
def test_run_sql_params(self):
"""
#23426 - RunSQL should accept parameters.
"""
project_state = self.set_up_test_model("test_runsql")
# Create the operation
operation = migrations.RunSQL(
["CREATE TABLE i_love_ponies (id int, special_thing varchar(15));"],
["DROP TABLE i_love_ponies"],
)
param_operation = migrations.RunSQL(
# forwards
(
"INSERT INTO i_love_ponies (id, special_thing) VALUES (1, 'Django');",
[
"INSERT INTO i_love_ponies (id, special_thing) VALUES (2, %s);",
["Ponies"],
],
(
"INSERT INTO i_love_ponies (id, special_thing) VALUES (%s, %s);",
(
3,
"Python",
),
),
),
# backwards
[
"DELETE FROM i_love_ponies WHERE special_thing = 'Django';",
["DELETE FROM i_love_ponies WHERE special_thing = 'Ponies';", None],
(
"DELETE FROM i_love_ponies WHERE id = %s OR special_thing = %s;",
[3, "Python"],
),
],
)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
new_state = project_state.clone()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
# Test parameter passing
with connection.schema_editor() as editor:
param_operation.database_forwards(
"test_runsql", editor, project_state, new_state
)
# Make sure all the SQL was processed
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 3)
with connection.schema_editor() as editor:
param_operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 0)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
self.assertTableNotExists("i_love_ponies")
def test_run_sql_params_invalid(self):
"""
#23426 - RunSQL should fail when a list of statements with an incorrect
number of tuples is given.
"""
project_state = self.set_up_test_model("test_runsql")
new_state = project_state.clone()
operation = migrations.RunSQL(
# forwards
[["INSERT INTO foo (bar) VALUES ('buz');"]],
# backwards
(("DELETE FROM foo WHERE bar = 'buz';", "invalid", "parameter count"),),
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, "Expected a 2-tuple but got 1"):
operation.database_forwards(
"test_runsql", editor, project_state, new_state
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, "Expected a 2-tuple but got 3"):
operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
def test_run_sql_noop(self):
"""
#24098 - Tests no-op RunSQL operations.
"""
operation = migrations.RunSQL(migrations.RunSQL.noop, migrations.RunSQL.noop)
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, None, None)
operation.database_backwards("test_runsql", editor, None, None)
def test_run_sql_add_missing_semicolon_on_collect_sql(self):
project_state = self.set_up_test_model("test_runsql")
new_state = project_state.clone()
tests = [
"INSERT INTO test_runsql_pony (pink, weight) VALUES (1, 1);\n",
"INSERT INTO test_runsql_pony (pink, weight) VALUES (1, 1)\n",
]
for sql in tests:
with self.subTest(sql=sql):
operation = migrations.RunSQL(sql, migrations.RunPython.noop)
with connection.schema_editor(collect_sql=True) as editor:
operation.database_forwards(
"test_runsql", editor, project_state, new_state
)
collected_sql = "\n".join(editor.collected_sql)
self.assertEqual(collected_sql.count(";"), 1)
def test_run_python(self):
"""
Tests the RunPython operation
"""
project_state = self.set_up_test_model("test_runpython", mti_model=True)
# Create the operation
def inner_method(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
Pony.objects.create(pink=1, weight=3.55)
Pony.objects.create(weight=5)
def inner_method_reverse(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
Pony.objects.filter(pink=1, weight=3.55).delete()
Pony.objects.filter(weight=5).delete()
operation = migrations.RunPython(
inner_method, reverse_code=inner_method_reverse
)
self.assertEqual(operation.describe(), "Raw Python operation")
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards("test_runpython", new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0
)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2
)
# Now test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0
)
# Now test we can't use a string
with self.assertRaisesMessage(
ValueError, "RunPython must be supplied with a callable"
):
migrations.RunPython("print 'ahahaha'")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["code", "reverse_code"])
# Also test reversal fails, with an operation identical to above but
# without reverse_code set.
no_reverse_operation = migrations.RunPython(inner_method)
self.assertFalse(no_reverse_operation.reversible)
with connection.schema_editor() as editor:
no_reverse_operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
with self.assertRaises(NotImplementedError):
no_reverse_operation.database_backwards(
"test_runpython", editor, new_state, project_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2
)
def create_ponies(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
pony1 = Pony.objects.create(pink=1, weight=3.55)
self.assertIsNot(pony1.pk, None)
pony2 = Pony.objects.create(weight=5)
self.assertIsNot(pony2.pk, None)
self.assertNotEqual(pony1.pk, pony2.pk)
operation = migrations.RunPython(create_ponies)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 4
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["code"])
def create_shetlandponies(models, schema_editor):
ShetlandPony = models.get_model("test_runpython", "ShetlandPony")
pony1 = ShetlandPony.objects.create(weight=4.0)
self.assertIsNot(pony1.pk, None)
pony2 = ShetlandPony.objects.create(weight=5.0)
self.assertIsNot(pony2.pk, None)
self.assertNotEqual(pony1.pk, pony2.pk)
operation = migrations.RunPython(create_shetlandponies)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 6
)
self.assertEqual(
project_state.apps.get_model(
"test_runpython", "ShetlandPony"
).objects.count(),
2,
)
# And elidable reduction
self.assertIs(False, operation.reduce(operation, []))
elidable_operation = migrations.RunPython(inner_method, elidable=True)
self.assertEqual(elidable_operation.reduce(operation, []), [operation])
def test_run_python_atomic(self):
"""
Tests the RunPython operation correctly handles the "atomic" keyword
"""
project_state = self.set_up_test_model("test_runpythonatomic", mti_model=True)
def inner_method(models, schema_editor):
Pony = models.get_model("test_runpythonatomic", "Pony")
Pony.objects.create(pink=1, weight=3.55)
raise ValueError("Adrian hates ponies.")
# Verify atomicity when applying.
atomic_migration = Migration("test", "test_runpythonatomic")
atomic_migration.operations = [
migrations.RunPython(inner_method, reverse_code=inner_method)
]
non_atomic_migration = Migration("test", "test_runpythonatomic")
non_atomic_migration.operations = [
migrations.RunPython(inner_method, reverse_code=inner_method, atomic=False)
]
# If we're a fully-transactional database, both versions should rollback
if connection.features.can_rollback_ddl:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
# Otherwise, the non-atomic operation should leave a row there
else:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
1,
)
# Reset object count to zero and verify atomicity when unapplying.
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.all().delete()
# On a fully-transactional database, both versions rollback.
if connection.features.can_rollback_ddl:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
# Otherwise, the non-atomic operation leaves a row there.
else:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
1,
)
# Verify deconstruction.
definition = non_atomic_migration.operations[0].deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["atomic", "code", "reverse_code"])
def test_run_python_related_assignment(self):
"""
#24282 - Model changes to a FK reverse side update the model
on the FK side as well.
"""
def inner_method(models, schema_editor):
Author = models.get_model("test_authors", "Author")
Book = models.get_model("test_books", "Book")
author = Author.objects.create(name="Hemingway")
Book.objects.create(title="Old Man and The Sea", author=author)
create_author = migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_book = migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
("title", models.CharField(max_length=100)),
("author", models.ForeignKey("test_authors.Author", models.CASCADE)),
],
options={},
)
add_hometown = migrations.AddField(
"Author",
"hometown",
models.CharField(max_length=100),
)
create_old_man = migrations.RunPython(inner_method, inner_method)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_author.state_forwards("test_authors", new_state)
create_author.database_forwards(
"test_authors", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_book.state_forwards("test_books", new_state)
create_book.database_forwards(
"test_books", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
add_hometown.state_forwards("test_authors", new_state)
add_hometown.database_forwards(
"test_authors", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_old_man.state_forwards("test_books", new_state)
create_old_man.database_forwards(
"test_books", editor, project_state, new_state
)
def test_model_with_bigautofield(self):
"""
A model with BigAutoField can be created.
"""
def create_data(models, schema_editor):
Author = models.get_model("test_author", "Author")
Book = models.get_model("test_book", "Book")
author1 = Author.objects.create(name="Hemingway")
Book.objects.create(title="Old Man and The Sea", author=author1)
Book.objects.create(id=2**33, title="A farewell to arms", author=author1)
author2 = Author.objects.create(id=2**33, name="Remarque")
Book.objects.create(title="All quiet on the western front", author=author2)
Book.objects.create(title="Arc de Triomphe", author=author2)
create_author = migrations.CreateModel(
"Author",
[
("id", models.BigAutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_book = migrations.CreateModel(
"Book",
[
("id", models.BigAutoField(primary_key=True)),
("title", models.CharField(max_length=100)),
(
"author",
models.ForeignKey(
to="test_author.Author", on_delete=models.CASCADE
),
),
],
options={},
)
fill_data = migrations.RunPython(create_data)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_author.state_forwards("test_author", new_state)
create_author.database_forwards(
"test_author", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_book.state_forwards("test_book", new_state)
create_book.database_forwards("test_book", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
fill_data.state_forwards("fill_data", new_state)
fill_data.database_forwards("fill_data", editor, project_state, new_state)
def _test_autofield_foreignfield_growth(
self, source_field, target_field, target_value
):
"""
A field may be migrated in the following ways:
- AutoField to BigAutoField
- SmallAutoField to AutoField
- SmallAutoField to BigAutoField
"""
def create_initial_data(models, schema_editor):
Article = models.get_model("test_article", "Article")
Blog = models.get_model("test_blog", "Blog")
blog = Blog.objects.create(name="web development done right")
Article.objects.create(name="Frameworks", blog=blog)
Article.objects.create(name="Programming Languages", blog=blog)
def create_big_data(models, schema_editor):
Article = models.get_model("test_article", "Article")
Blog = models.get_model("test_blog", "Blog")
blog2 = Blog.objects.create(name="Frameworks", id=target_value)
Article.objects.create(name="Django", blog=blog2)
Article.objects.create(id=target_value, name="Django2", blog=blog2)
create_blog = migrations.CreateModel(
"Blog",
[
("id", source_field(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_article = migrations.CreateModel(
"Article",
[
("id", source_field(primary_key=True)),
(
"blog",
models.ForeignKey(to="test_blog.Blog", on_delete=models.CASCADE),
),
("name", models.CharField(max_length=100)),
("data", models.TextField(default="")),
],
options={},
)
fill_initial_data = migrations.RunPython(
create_initial_data, create_initial_data
)
fill_big_data = migrations.RunPython(create_big_data, create_big_data)
grow_article_id = migrations.AlterField(
"Article", "id", target_field(primary_key=True)
)
grow_blog_id = migrations.AlterField(
"Blog", "id", target_field(primary_key=True)
)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_blog.state_forwards("test_blog", new_state)
create_blog.database_forwards("test_blog", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_article.state_forwards("test_article", new_state)
create_article.database_forwards(
"test_article", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
fill_initial_data.state_forwards("fill_initial_data", new_state)
fill_initial_data.database_forwards(
"fill_initial_data", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
grow_article_id.state_forwards("test_article", new_state)
grow_article_id.database_forwards(
"test_article", editor, project_state, new_state
)
state = new_state.clone()
article = state.apps.get_model("test_article.Article")
self.assertIsInstance(article._meta.pk, target_field)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
grow_blog_id.state_forwards("test_blog", new_state)
grow_blog_id.database_forwards(
"test_blog", editor, project_state, new_state
)
state = new_state.clone()
blog = state.apps.get_model("test_blog.Blog")
self.assertIsInstance(blog._meta.pk, target_field)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
fill_big_data.state_forwards("fill_big_data", new_state)
fill_big_data.database_forwards(
"fill_big_data", editor, project_state, new_state
)
def test_autofield__bigautofield_foreignfield_growth(self):
"""A field may be migrated from AutoField to BigAutoField."""
self._test_autofield_foreignfield_growth(
models.AutoField,
models.BigAutoField,
2**33,
)
def test_smallfield_autofield_foreignfield_growth(self):
"""A field may be migrated from SmallAutoField to AutoField."""
self._test_autofield_foreignfield_growth(
models.SmallAutoField,
models.AutoField,
2**22,
)
def test_smallfield_bigautofield_foreignfield_growth(self):
"""A field may be migrated from SmallAutoField to BigAutoField."""
self._test_autofield_foreignfield_growth(
models.SmallAutoField,
models.BigAutoField,
2**33,
)
def test_run_python_noop(self):
"""
#24098 - Tests no-op RunPython operations.
"""
project_state = ProjectState()
new_state = project_state.clone()
operation = migrations.RunPython(
migrations.RunPython.noop, migrations.RunPython.noop
)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
operation.database_backwards(
"test_runpython", editor, new_state, project_state
)
def test_separate_database_and_state(self):
"""
Tests the SeparateDatabaseAndState operation.
"""
project_state = self.set_up_test_model("test_separatedatabaseandstate")
# Create the operation
database_operation = migrations.RunSQL(
"CREATE TABLE i_love_ponies (id int, special_thing int);",
"DROP TABLE i_love_ponies;",
)
state_operation = migrations.CreateModel(
"SomethingElse", [("id", models.AutoField(primary_key=True))]
)
operation = migrations.SeparateDatabaseAndState(
state_operations=[state_operation], database_operations=[database_operation]
)
self.assertEqual(
operation.describe(), "Custom state/database change combination"
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_separatedatabaseandstate", new_state)
self.assertEqual(
len(
new_state.models[
"test_separatedatabaseandstate", "somethingelse"
].fields
),
1,
)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_separatedatabaseandstate", editor, project_state, new_state
)
self.assertTableExists("i_love_ponies")
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(
"test_separatedatabaseandstate", editor, new_state, project_state
)
self.assertTableNotExists("i_love_ponies")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "SeparateDatabaseAndState")
self.assertEqual(definition[1], [])
self.assertEqual(
sorted(definition[2]), ["database_operations", "state_operations"]
)
def test_separate_database_and_state2(self):
"""
A complex SeparateDatabaseAndState operation: Multiple operations both
for state and database. Verify the state dependencies within each list
and that state ops don't affect the database.
"""
app_label = "test_separatedatabaseandstate2"
project_state = self.set_up_test_model(app_label)
# Create the operation
database_operations = [
migrations.CreateModel(
"ILovePonies",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "iloveponies"},
),
migrations.CreateModel(
"ILoveMorePonies",
# We use IntegerField and not AutoField because
# the model is going to be deleted immediately
# and with an AutoField this fails on Oracle
[("id", models.IntegerField(primary_key=True))],
options={"db_table": "ilovemoreponies"},
),
migrations.DeleteModel("ILoveMorePonies"),
migrations.CreateModel(
"ILoveEvenMorePonies",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "iloveevenmoreponies"},
),
]
state_operations = [
migrations.CreateModel(
"SomethingElse",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "somethingelse"},
),
migrations.DeleteModel("SomethingElse"),
migrations.CreateModel(
"SomethingCompletelyDifferent",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "somethingcompletelydifferent"},
),
]
operation = migrations.SeparateDatabaseAndState(
state_operations=state_operations,
database_operations=database_operations,
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
def assertModelsAndTables(after_db):
# Tables and models exist, or don't, as they should:
self.assertNotIn((app_label, "somethingelse"), new_state.models)
self.assertEqual(
len(new_state.models[app_label, "somethingcompletelydifferent"].fields),
1,
)
self.assertNotIn((app_label, "iloveponiesonies"), new_state.models)
self.assertNotIn((app_label, "ilovemoreponies"), new_state.models)
self.assertNotIn((app_label, "iloveevenmoreponies"), new_state.models)
self.assertTableNotExists("somethingelse")
self.assertTableNotExists("somethingcompletelydifferent")
self.assertTableNotExists("ilovemoreponies")
if after_db:
self.assertTableExists("iloveponies")
self.assertTableExists("iloveevenmoreponies")
else:
self.assertTableNotExists("iloveponies")
self.assertTableNotExists("iloveevenmoreponies")
assertModelsAndTables(after_db=False)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
assertModelsAndTables(after_db=True)
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
assertModelsAndTables(after_db=False)
class SwappableOperationTests(OperationTestBase):
"""
Key operations ignore swappable models
(we don't want to replicate all of them here, as the functionality
is in a common base class anyway)
"""
available_apps = ["migrations"]
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_create_ignore_swapped(self):
"""
The CreateTable operation ignores swapped models.
"""
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
options={
"swappable": "TEST_SWAP_MODEL",
},
)
# Test the state alteration (it should still be there!)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crigsw", new_state)
self.assertEqual(new_state.models["test_crigsw", "pony"].name, "Pony")
self.assertEqual(len(new_state.models["test_crigsw", "pony"].fields), 2)
# Test the database alteration
self.assertTableNotExists("test_crigsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crigsw", editor, project_state, new_state)
self.assertTableNotExists("test_crigsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crigsw", editor, new_state, project_state
)
self.assertTableNotExists("test_crigsw_pony")
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_delete_ignore_swapped(self):
"""
Tests the DeleteModel operation ignores swapped models.
"""
operation = migrations.DeleteModel("Pony")
project_state, new_state = self.make_test_state("test_dligsw", operation)
# Test the database alteration
self.assertTableNotExists("test_dligsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dligsw", editor, project_state, new_state)
self.assertTableNotExists("test_dligsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_dligsw", editor, new_state, project_state
)
self.assertTableNotExists("test_dligsw_pony")
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_add_field_ignore_swapped(self):
"""
Tests the AddField operation.
"""
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=5),
)
project_state, new_state = self.make_test_state("test_adfligsw", operation)
# Test the database alteration
self.assertTableNotExists("test_adfligsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards(
"test_adfligsw", editor, project_state, new_state
)
self.assertTableNotExists("test_adfligsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_adfligsw", editor, new_state, project_state
)
self.assertTableNotExists("test_adfligsw_pony")
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_indexes_ignore_swapped(self):
"""
Add/RemoveIndex operations ignore swapped models.
"""
operation = migrations.AddIndex(
"Pony", models.Index(fields=["pink"], name="my_name_idx")
)
project_state, new_state = self.make_test_state("test_adinigsw", operation)
with connection.schema_editor() as editor:
# No database queries should be run for swapped models
operation.database_forwards(
"test_adinigsw", editor, project_state, new_state
)
operation.database_backwards(
"test_adinigsw", editor, new_state, project_state
)
operation = migrations.RemoveIndex(
"Pony", models.Index(fields=["pink"], name="my_name_idx")
)
project_state, new_state = self.make_test_state("test_rminigsw", operation)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_rminigsw", editor, project_state, new_state
)
operation.database_backwards(
"test_rminigsw", editor, new_state, project_state
)
class TestCreateModel(SimpleTestCase):
def test_references_model_mixin(self):
migrations.CreateModel(
"name",
fields=[],
bases=(Mixin, models.Model),
).references_model("other_model", "migrations")
class FieldOperationTests(SimpleTestCase):
def test_references_model(self):
operation = FieldOperation(
"MoDel", "field", models.ForeignKey("Other", models.CASCADE)
)
# Model name match.
self.assertIs(operation.references_model("mOdEl", "migrations"), True)
# Referenced field.
self.assertIs(operation.references_model("oTher", "migrations"), True)
# Doesn't reference.
self.assertIs(operation.references_model("Whatever", "migrations"), False)
def test_references_field_by_name(self):
operation = FieldOperation("MoDel", "field", models.BooleanField(default=False))
self.assertIs(operation.references_field("model", "field", "migrations"), True)
def test_references_field_by_remote_field_model(self):
operation = FieldOperation(
"Model", "field", models.ForeignKey("Other", models.CASCADE)
)
self.assertIs(
operation.references_field("Other", "whatever", "migrations"), True
)
self.assertIs(
operation.references_field("Missing", "whatever", "migrations"), False
)
def test_references_field_by_from_fields(self):
operation = FieldOperation(
"Model",
"field",
models.fields.related.ForeignObject(
"Other", models.CASCADE, ["from"], ["to"]
),
)
self.assertIs(operation.references_field("Model", "from", "migrations"), True)
self.assertIs(operation.references_field("Model", "to", "migrations"), False)
self.assertIs(operation.references_field("Other", "from", "migrations"), False)
self.assertIs(operation.references_field("Model", "to", "migrations"), False)
def test_references_field_by_to_fields(self):
operation = FieldOperation(
"Model",
"field",
models.ForeignKey("Other", models.CASCADE, to_field="field"),
)
self.assertIs(operation.references_field("Other", "field", "migrations"), True)
self.assertIs(
operation.references_field("Other", "whatever", "migrations"), False
)
self.assertIs(
operation.references_field("Missing", "whatever", "migrations"), False
)
def test_references_field_by_through(self):
operation = FieldOperation(
"Model", "field", models.ManyToManyField("Other", through="Through")
)
self.assertIs(
operation.references_field("Other", "whatever", "migrations"), True
)
self.assertIs(
operation.references_field("Through", "whatever", "migrations"), True
)
self.assertIs(
operation.references_field("Missing", "whatever", "migrations"), False
)
def test_reference_field_by_through_fields(self):
operation = FieldOperation(
"Model",
"field",
models.ManyToManyField(
"Other", through="Through", through_fields=("first", "second")
),
)
self.assertIs(
operation.references_field("Other", "whatever", "migrations"), True
)
self.assertIs(
operation.references_field("Through", "whatever", "migrations"), False
)
self.assertIs(
operation.references_field("Through", "first", "migrations"), True
)
self.assertIs(
operation.references_field("Through", "second", "migrations"), True
)
|
51316cead17250b0ba1a27c5bf113912e72b381c43925edc40714eb3496ced7c | from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.auth.models import User
from django.test import override_settings
from django.urls import reverse
@override_settings(ROOT_URLCONF="admin_views.urls")
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ["admin_views"] + AdminSeleniumTestCase.available_apps
def setUp(self):
self.superuser = User.objects.create_superuser(
username="super",
password="secret",
email="[email protected]",
)
def test_use_skip_link_to_content(self):
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
self.admin_login(
username="super",
password="secret",
login_url=reverse("admin:index"),
)
# `Skip link` is not present.
skip_link = self.selenium.find_element(By.CLASS_NAME, "skip-to-content-link")
self.assertFalse(skip_link.is_displayed())
# 1st TAB is pressed, `skip link` is shown.
body = self.selenium.find_element(By.TAG_NAME, "body")
body.send_keys(Keys.TAB)
self.assertTrue(skip_link.is_displayed())
# Press RETURN to skip the navbar links (view site / documentation /
# change password / log out) and focus first model in the admin_views list.
skip_link.send_keys(Keys.RETURN)
self.assertFalse(skip_link.is_displayed()) # `skip link` disappear.
keys = [Keys.TAB, Keys.TAB] # The 1st TAB is the section title.
if self.browser == "firefox":
# For some reason Firefox doesn't focus the section title ('ADMIN_VIEWS').
keys.remove(Keys.TAB)
body.send_keys(keys)
actors_a_tag = self.selenium.find_element(By.LINK_TEXT, "Actors")
self.assertEqual(self.selenium.switch_to.active_element, actors_a_tag)
# Go to Actors changelist, skip sidebar and focus "Add actor +".
with self.wait_page_loaded():
actors_a_tag.send_keys(Keys.RETURN)
body = self.selenium.find_element(By.TAG_NAME, "body")
body.send_keys(Keys.TAB)
skip_link = self.selenium.find_element(By.CLASS_NAME, "skip-to-content-link")
self.assertTrue(skip_link.is_displayed())
ActionChains(self.selenium).send_keys(Keys.RETURN, Keys.TAB).perform()
actors_add_url = reverse("admin:admin_views_actor_add")
actors_a_tag = self.selenium.find_element(
By.CSS_SELECTOR, f"#content [href='{actors_add_url}']"
)
self.assertEqual(self.selenium.switch_to.active_element, actors_a_tag)
# Go to the Actor form and the first input will be focused automatically.
with self.wait_page_loaded():
actors_a_tag.send_keys(Keys.RETURN)
first_input = self.selenium.find_element(By.ID, "id_name")
self.assertEqual(self.selenium.switch_to.active_element, first_input)
def test_dont_use_skip_link_to_content(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
self.admin_login(
username="super",
password="secret",
login_url=reverse("admin:index"),
)
# `Skip link` is not present.
skip_link = self.selenium.find_element(By.CLASS_NAME, "skip-to-content-link")
self.assertFalse(skip_link.is_displayed())
# 1st TAB is pressed, `skip link` is shown.
body = self.selenium.find_element(By.TAG_NAME, "body")
body.send_keys(Keys.TAB)
self.assertTrue(skip_link.is_displayed())
# The 2nd TAB will focus the page title.
body.send_keys(Keys.TAB)
django_administration_title = self.selenium.find_element(
By.LINK_TEXT, "Django administration"
)
self.assertFalse(skip_link.is_displayed()) # `skip link` disappear.
self.assertEqual(
self.selenium.switch_to.active_element, django_administration_title
)
def test_skip_link_is_skipped_when_there_is_searchbar(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super",
password="secret",
login_url=reverse("admin:index"),
)
group_a_tag = self.selenium.find_element(By.LINK_TEXT, "Groups")
with self.wait_page_loaded():
group_a_tag.click()
# `Skip link` is not present.
skip_link = self.selenium.find_element(By.CLASS_NAME, "skip-to-content-link")
self.assertFalse(skip_link.is_displayed())
# `Searchbar` has autofocus.
searchbar = self.selenium.find_element(By.ID, "searchbar")
self.assertEqual(self.selenium.switch_to.active_element, searchbar)
def test_skip_link_with_RTL_language_doesnt_create_horizontal_scrolling(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
with override_settings(LANGUAGE_CODE="ar"):
self.admin_login(
username="super",
password="secret",
login_url=reverse("admin:index"),
)
skip_link = self.selenium.find_element(
By.CLASS_NAME, "skip-to-content-link"
)
body = self.selenium.find_element(By.TAG_NAME, "body")
body.send_keys(Keys.TAB)
self.assertTrue(skip_link.is_displayed())
is_vertical_scrolleable = self.selenium.execute_script(
"return arguments[0].scrollHeight > arguments[0].offsetHeight;", body
)
is_horizontal_scrolleable = self.selenium.execute_script(
"return arguments[0].scrollWeight > arguments[0].offsetWeight;", body
)
self.assertTrue(is_vertical_scrolleable)
self.assertFalse(is_horizontal_scrolleable)
|
3535c436ba36ef7b304c2350c5525eb6d9d2a0b22a4b1fa9392128249e5beb00 | import datetime
import os
import re
import unittest
from unittest import mock
from urllib.parse import parse_qsl, urljoin, urlparse
try:
import zoneinfo
except ImportError:
from backports import zoneinfo
try:
import pytz
except ImportError:
pytz = None
from django.contrib import admin
from django.contrib.admin import AdminSite, ModelAdmin
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.contrib.admin.models import ADDITION, DELETION, LogEntry
from django.contrib.admin.options import TO_FIELD_VAR
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.admin.utils import quote
from django.contrib.admin.views.main import IS_POPUP_VAR
from django.contrib.auth import REDIRECT_FIELD_NAME, get_permission_codename
from django.contrib.auth.models import Group, Permission, User
from django.contrib.contenttypes.models import ContentType
from django.core import mail
from django.core.checks import Error
from django.core.files import temp as tempfile
from django.db import connection
from django.forms.utils import ErrorList
from django.template.response import TemplateResponse
from django.test import (
TestCase,
modify_settings,
override_settings,
skipUnlessDBFeature,
)
from django.test.utils import override_script_prefix
from django.urls import NoReverseMatch, resolve, reverse
from django.utils import formats, translation
from django.utils.cache import get_max_age
from django.utils.encoding import iri_to_uri
from django.utils.html import escape
from django.utils.http import urlencode
from . import customadmin
from .admin import CityAdmin, site, site2
from .models import (
Actor,
AdminOrderedAdminMethod,
AdminOrderedCallable,
AdminOrderedField,
AdminOrderedModelMethod,
Album,
Answer,
Answer2,
Article,
BarAccount,
Book,
Bookmark,
Box,
Category,
Chapter,
ChapterXtra1,
ChapterXtra2,
Character,
Child,
Choice,
City,
Collector,
Color,
ComplexSortedPerson,
CoverLetter,
CustomArticle,
CyclicOne,
CyclicTwo,
DooHickey,
Employee,
EmptyModel,
Fabric,
FancyDoodad,
FieldOverridePost,
FilteredManager,
FooAccount,
FoodDelivery,
FunkyTag,
Gallery,
Grommet,
Inquisition,
Language,
Link,
MainPrepopulated,
Media,
ModelWithStringPrimaryKey,
OtherStory,
Paper,
Parent,
ParentWithDependentChildren,
ParentWithUUIDPK,
Person,
Persona,
Picture,
Pizza,
Plot,
PlotDetails,
PluggableSearchPerson,
Podcast,
Post,
PrePopulatedPost,
Promo,
Question,
ReadablePizza,
ReadOnlyPizza,
ReadOnlyRelatedField,
Recommendation,
Recommender,
RelatedPrepopulated,
RelatedWithUUIDPKModel,
Report,
Restaurant,
RowLevelChangePermissionModel,
SecretHideout,
Section,
ShortMessage,
Simple,
Song,
State,
Story,
SuperSecretHideout,
SuperVillain,
Telegram,
TitleTranslation,
Topping,
Traveler,
UnchangeableObject,
UndeletableObject,
UnorderedObject,
UserProxy,
Villain,
Vodcast,
Whatsit,
Widget,
Worker,
WorkHour,
)
ERROR_MESSAGE = "Please enter the correct username and password \
for a staff account. Note that both fields may be case-sensitive."
MULTIPART_ENCTYPE = 'enctype="multipart/form-data"'
def make_aware_datetimes(dt, iana_key):
"""Makes one aware datetime for each supported time zone provider."""
yield dt.replace(tzinfo=zoneinfo.ZoneInfo(iana_key))
if pytz is not None:
yield pytz.timezone(iana_key).localize(dt, is_dst=None)
class AdminFieldExtractionMixin:
"""
Helper methods for extracting data from AdminForm.
"""
def get_admin_form_fields(self, response):
"""
Return a list of AdminFields for the AdminForm in the response.
"""
fields = []
for fieldset in response.context["adminform"]:
for field_line in fieldset:
fields.extend(field_line)
return fields
def get_admin_readonly_fields(self, response):
"""
Return the readonly fields for the response's AdminForm.
"""
return [f for f in self.get_admin_form_fields(response) if f.is_readonly]
def get_admin_readonly_field(self, response, field_name):
"""
Return the readonly field for the given field_name.
"""
admin_readonly_fields = self.get_admin_readonly_fields(response)
for field in admin_readonly_fields:
if field.field["name"] == field_name:
return field
@override_settings(ROOT_URLCONF="admin_views.urls", USE_I18N=True, LANGUAGE_CODE="en")
class AdminViewBasicTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
title="Article 1",
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
title="Article 2",
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.color1 = Color.objects.create(value="Red", warm=True)
cls.color2 = Color.objects.create(value="Orange", warm=True)
cls.color3 = Color.objects.create(value="Blue", warm=False)
cls.color4 = Color.objects.create(value="Green", warm=False)
cls.fab1 = Fabric.objects.create(surface="x")
cls.fab2 = Fabric.objects.create(surface="y")
cls.fab3 = Fabric.objects.create(surface="plain")
cls.b1 = Book.objects.create(name="Book 1")
cls.b2 = Book.objects.create(name="Book 2")
cls.pro1 = Promo.objects.create(name="Promo 1", book=cls.b1)
cls.pro1 = Promo.objects.create(name="Promo 2", book=cls.b2)
cls.chap1 = Chapter.objects.create(
title="Chapter 1", content="[ insert contents here ]", book=cls.b1
)
cls.chap2 = Chapter.objects.create(
title="Chapter 2", content="[ insert contents here ]", book=cls.b1
)
cls.chap3 = Chapter.objects.create(
title="Chapter 1", content="[ insert contents here ]", book=cls.b2
)
cls.chap4 = Chapter.objects.create(
title="Chapter 2", content="[ insert contents here ]", book=cls.b2
)
cls.cx1 = ChapterXtra1.objects.create(chap=cls.chap1, xtra="ChapterXtra1 1")
cls.cx2 = ChapterXtra1.objects.create(chap=cls.chap3, xtra="ChapterXtra1 2")
Actor.objects.create(name="Palin", age=27)
# Post data for edit inline
cls.inline_post_data = {
"name": "Test section",
# inline data
"article_set-TOTAL_FORMS": "6",
"article_set-INITIAL_FORMS": "3",
"article_set-MAX_NUM_FORMS": "0",
"article_set-0-id": cls.a1.pk,
# there is no title in database, give one here or formset will fail.
"article_set-0-title": "Norske bostaver æøå skaper problemer",
"article_set-0-content": "<p>Middle content</p>",
"article_set-0-date_0": "2008-03-18",
"article_set-0-date_1": "11:54:58",
"article_set-0-section": cls.s1.pk,
"article_set-1-id": cls.a2.pk,
"article_set-1-title": "Need a title.",
"article_set-1-content": "<p>Oldest content</p>",
"article_set-1-date_0": "2000-03-18",
"article_set-1-date_1": "11:54:58",
"article_set-2-id": cls.a3.pk,
"article_set-2-title": "Need a title.",
"article_set-2-content": "<p>Newest content</p>",
"article_set-2-date_0": "2009-03-18",
"article_set-2-date_1": "11:54:58",
"article_set-3-id": "",
"article_set-3-title": "",
"article_set-3-content": "",
"article_set-3-date_0": "",
"article_set-3-date_1": "",
"article_set-4-id": "",
"article_set-4-title": "",
"article_set-4-content": "",
"article_set-4-date_0": "",
"article_set-4-date_1": "",
"article_set-5-id": "",
"article_set-5-title": "",
"article_set-5-content": "",
"article_set-5-date_0": "",
"article_set-5-date_1": "",
}
def setUp(self):
self.client.force_login(self.superuser)
def assertContentBefore(self, response, text1, text2, failing_msg=None):
"""
Testing utility asserting that text1 appears before text2 in response
content.
"""
self.assertEqual(response.status_code, 200)
self.assertLess(
response.content.index(text1.encode()),
response.content.index(text2.encode()),
(failing_msg or "")
+ "\nResponse:\n"
+ response.content.decode(response.charset),
)
class AdminViewBasicTest(AdminViewBasicTestCase):
def test_trailing_slash_required(self):
"""
If you leave off the trailing slash, app should redirect and add it.
"""
add_url = reverse("admin:admin_views_article_add")
response = self.client.get(add_url[:-1])
self.assertRedirects(response, add_url, status_code=301)
def test_basic_add_GET(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
def test_add_with_GET_args(self):
response = self.client.get(
reverse("admin:admin_views_section_add"), {"name": "My Section"}
)
self.assertContains(
response,
'value="My Section"',
msg_prefix="Couldn't find an input with the right value in the response",
)
def test_basic_edit_GET(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
def test_basic_edit_GET_string_PK(self):
"""
GET on the change_view (when passing a string as the PK argument for a
model with an integer PK field) redirects to the index page with a
message saying the object doesn't exist.
"""
response = self.client.get(
reverse("admin:admin_views_section_change", args=(quote("abc/<b>"),)),
follow=True,
)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["section with ID “abc/<b>” doesn’t exist. Perhaps it was deleted?"],
)
def test_basic_edit_GET_old_url_redirect(self):
"""
The change URL changed in Django 1.9, but the old one still redirects.
"""
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)).replace(
"change/", ""
)
)
self.assertRedirects(
response, reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
def test_basic_inheritance_GET_string_PK(self):
"""
GET on the change_view (for inherited models) redirects to the index
page with a message saying the object doesn't exist.
"""
response = self.client.get(
reverse("admin:admin_views_supervillain_change", args=("abc",)), follow=True
)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["super villain with ID “abc” doesn’t exist. Perhaps it was deleted?"],
)
def test_basic_add_POST(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "Another Section",
# inline data
"article_set-TOTAL_FORMS": "3",
"article_set-INITIAL_FORMS": "0",
"article_set-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse("admin:admin_views_section_add"), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_popup_add_POST(self):
"""HTTP response from a popup is properly escaped."""
post_data = {
IS_POPUP_VAR: "1",
"title": "title with a new\nline",
"content": "some content",
"date_0": "2010-09-10",
"date_1": "14:55:39",
}
response = self.client.post(reverse("admin:admin_views_article_add"), post_data)
self.assertContains(response, "title with a new\\nline")
def test_basic_edit_POST(self):
"""
A smoke test to ensure POST on edit_view works.
"""
url = reverse("admin:admin_views_section_change", args=(self.s1.pk,))
response = self.client.post(url, self.inline_post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_edit_save_as(self):
"""
Test "save as".
"""
post_data = self.inline_post_data.copy()
post_data.update(
{
"_saveasnew": "Save+as+new",
"article_set-1-section": "1",
"article_set-2-section": "1",
"article_set-3-section": "1",
"article_set-4-section": "1",
"article_set-5-section": "1",
}
)
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), post_data
)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_edit_save_as_delete_inline(self):
"""
Should be able to "Save as new" while also deleting an inline.
"""
post_data = self.inline_post_data.copy()
post_data.update(
{
"_saveasnew": "Save+as+new",
"article_set-1-section": "1",
"article_set-2-section": "1",
"article_set-2-DELETE": "1",
"article_set-3-section": "1",
}
)
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), post_data
)
self.assertEqual(response.status_code, 302)
# started with 3 articles, one was deleted.
self.assertEqual(Section.objects.latest("id").article_set.count(), 2)
def test_change_list_column_field_classes(self):
response = self.client.get(reverse("admin:admin_views_article_changelist"))
# callables display the callable name.
self.assertContains(response, "column-callable_year")
self.assertContains(response, "field-callable_year")
# lambdas display as "lambda" + index that they appear in list_display.
self.assertContains(response, "column-lambda8")
self.assertContains(response, "field-lambda8")
def test_change_list_sorting_callable(self):
"""
Ensure we can sort on a list_display field that is a callable
(column 2 is callable_year in ArticleAdmin)
"""
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": 2}
)
self.assertContentBefore(
response,
"Oldest content",
"Middle content",
"Results of sorting on callable are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Newest content",
"Results of sorting on callable are out of order.",
)
def test_change_list_sorting_property(self):
"""
Sort on a list_display field that is a property (column 10 is
a property in Article model).
"""
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": 10}
)
self.assertContentBefore(
response,
"Oldest content",
"Middle content",
"Results of sorting on property are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Newest content",
"Results of sorting on property are out of order.",
)
def test_change_list_sorting_callable_query_expression(self):
"""Query expressions may be used for admin_order_field."""
tests = [
("order_by_expression", 9),
("order_by_f_expression", 12),
("order_by_orderby_expression", 13),
]
for admin_order_field, index in tests:
with self.subTest(admin_order_field):
response = self.client.get(
reverse("admin:admin_views_article_changelist"),
{"o": index},
)
self.assertContentBefore(
response,
"Oldest content",
"Middle content",
"Results of sorting on callable are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Newest content",
"Results of sorting on callable are out of order.",
)
def test_change_list_sorting_callable_query_expression_reverse(self):
tests = [
("order_by_expression", -9),
("order_by_f_expression", -12),
("order_by_orderby_expression", -13),
]
for admin_order_field, index in tests:
with self.subTest(admin_order_field):
response = self.client.get(
reverse("admin:admin_views_article_changelist"),
{"o": index},
)
self.assertContentBefore(
response,
"Middle content",
"Oldest content",
"Results of sorting on callable are out of order.",
)
self.assertContentBefore(
response,
"Newest content",
"Middle content",
"Results of sorting on callable are out of order.",
)
def test_change_list_sorting_model(self):
"""
Ensure we can sort on a list_display field that is a Model method
(column 3 is 'model_year' in ArticleAdmin)
"""
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": "-3"}
)
self.assertContentBefore(
response,
"Newest content",
"Middle content",
"Results of sorting on Model method are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Oldest content",
"Results of sorting on Model method are out of order.",
)
def test_change_list_sorting_model_admin(self):
"""
Ensure we can sort on a list_display field that is a ModelAdmin method
(column 4 is 'modeladmin_year' in ArticleAdmin)
"""
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": "4"}
)
self.assertContentBefore(
response,
"Oldest content",
"Middle content",
"Results of sorting on ModelAdmin method are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Newest content",
"Results of sorting on ModelAdmin method are out of order.",
)
def test_change_list_sorting_model_admin_reverse(self):
"""
Ensure we can sort on a list_display field that is a ModelAdmin
method in reverse order (i.e. admin_order_field uses the '-' prefix)
(column 6 is 'model_year_reverse' in ArticleAdmin)
"""
td = '<td class="field-model_property_year">%s</td>'
td_2000, td_2008, td_2009 = td % 2000, td % 2008, td % 2009
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": "6"}
)
self.assertContentBefore(
response,
td_2009,
td_2008,
"Results of sorting on ModelAdmin method are out of order.",
)
self.assertContentBefore(
response,
td_2008,
td_2000,
"Results of sorting on ModelAdmin method are out of order.",
)
# Let's make sure the ordering is right and that we don't get a
# FieldError when we change to descending order
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": "-6"}
)
self.assertContentBefore(
response,
td_2000,
td_2008,
"Results of sorting on ModelAdmin method are out of order.",
)
self.assertContentBefore(
response,
td_2008,
td_2009,
"Results of sorting on ModelAdmin method are out of order.",
)
def test_change_list_sorting_multiple(self):
p1 = Person.objects.create(name="Chris", gender=1, alive=True)
p2 = Person.objects.create(name="Chris", gender=2, alive=True)
p3 = Person.objects.create(name="Bob", gender=1, alive=True)
link1 = reverse("admin:admin_views_person_change", args=(p1.pk,))
link2 = reverse("admin:admin_views_person_change", args=(p2.pk,))
link3 = reverse("admin:admin_views_person_change", args=(p3.pk,))
# Sort by name, gender
response = self.client.get(
reverse("admin:admin_views_person_changelist"), {"o": "1.2"}
)
self.assertContentBefore(response, link3, link1)
self.assertContentBefore(response, link1, link2)
# Sort by gender descending, name
response = self.client.get(
reverse("admin:admin_views_person_changelist"), {"o": "-2.1"}
)
self.assertContentBefore(response, link2, link3)
self.assertContentBefore(response, link3, link1)
def test_change_list_sorting_preserve_queryset_ordering(self):
"""
If no ordering is defined in `ModelAdmin.ordering` or in the query
string, then the underlying order of the queryset should not be
changed, even if it is defined in `Modeladmin.get_queryset()`.
Refs #11868, #7309.
"""
p1 = Person.objects.create(name="Amy", gender=1, alive=True, age=80)
p2 = Person.objects.create(name="Bob", gender=1, alive=True, age=70)
p3 = Person.objects.create(name="Chris", gender=2, alive=False, age=60)
link1 = reverse("admin:admin_views_person_change", args=(p1.pk,))
link2 = reverse("admin:admin_views_person_change", args=(p2.pk,))
link3 = reverse("admin:admin_views_person_change", args=(p3.pk,))
response = self.client.get(reverse("admin:admin_views_person_changelist"), {})
self.assertContentBefore(response, link3, link2)
self.assertContentBefore(response, link2, link1)
def test_change_list_sorting_model_meta(self):
# Test ordering on Model Meta is respected
l1 = Language.objects.create(iso="ur", name="Urdu")
l2 = Language.objects.create(iso="ar", name="Arabic")
link1 = reverse("admin:admin_views_language_change", args=(quote(l1.pk),))
link2 = reverse("admin:admin_views_language_change", args=(quote(l2.pk),))
response = self.client.get(reverse("admin:admin_views_language_changelist"), {})
self.assertContentBefore(response, link2, link1)
# Test we can override with query string
response = self.client.get(
reverse("admin:admin_views_language_changelist"), {"o": "-1"}
)
self.assertContentBefore(response, link1, link2)
def test_change_list_sorting_override_model_admin(self):
# Test ordering on Model Admin is respected, and overrides Model Meta
dt = datetime.datetime.now()
p1 = Podcast.objects.create(name="A", release_date=dt)
p2 = Podcast.objects.create(name="B", release_date=dt - datetime.timedelta(10))
link1 = reverse("admin:admin_views_podcast_change", args=(p1.pk,))
link2 = reverse("admin:admin_views_podcast_change", args=(p2.pk,))
response = self.client.get(reverse("admin:admin_views_podcast_changelist"), {})
self.assertContentBefore(response, link1, link2)
def test_multiple_sort_same_field(self):
# The changelist displays the correct columns if two columns correspond
# to the same ordering field.
dt = datetime.datetime.now()
p1 = Podcast.objects.create(name="A", release_date=dt)
p2 = Podcast.objects.create(name="B", release_date=dt - datetime.timedelta(10))
link1 = reverse("admin:admin_views_podcast_change", args=(quote(p1.pk),))
link2 = reverse("admin:admin_views_podcast_change", args=(quote(p2.pk),))
response = self.client.get(reverse("admin:admin_views_podcast_changelist"), {})
self.assertContentBefore(response, link1, link2)
p1 = ComplexSortedPerson.objects.create(name="Bob", age=10)
p2 = ComplexSortedPerson.objects.create(name="Amy", age=20)
link1 = reverse("admin:admin_views_complexsortedperson_change", args=(p1.pk,))
link2 = reverse("admin:admin_views_complexsortedperson_change", args=(p2.pk,))
response = self.client.get(
reverse("admin:admin_views_complexsortedperson_changelist"), {}
)
# Should have 5 columns (including action checkbox col)
self.assertContains(response, '<th scope="col"', count=5)
self.assertContains(response, "Name")
self.assertContains(response, "Colored name")
# Check order
self.assertContentBefore(response, "Name", "Colored name")
# Check sorting - should be by name
self.assertContentBefore(response, link2, link1)
def test_sort_indicators_admin_order(self):
"""
The admin shows default sort indicators for all kinds of 'ordering'
fields: field names, method on the model admin and model itself, and
other callables. See #17252.
"""
models = [
(AdminOrderedField, "adminorderedfield"),
(AdminOrderedModelMethod, "adminorderedmodelmethod"),
(AdminOrderedAdminMethod, "adminorderedadminmethod"),
(AdminOrderedCallable, "adminorderedcallable"),
]
for model, url in models:
model.objects.create(stuff="The Last Item", order=3)
model.objects.create(stuff="The First Item", order=1)
model.objects.create(stuff="The Middle Item", order=2)
response = self.client.get(
reverse("admin:admin_views_%s_changelist" % url), {}
)
# Should have 3 columns including action checkbox col.
self.assertContains(response, '<th scope="col"', count=3, msg_prefix=url)
# Check if the correct column was selected. 2 is the index of the
# 'order' column in the model admin's 'list_display' with 0 being
# the implicit 'action_checkbox' and 1 being the column 'stuff'.
self.assertEqual(
response.context["cl"].get_ordering_field_columns(), {2: "asc"}
)
# Check order of records.
self.assertContentBefore(response, "The First Item", "The Middle Item")
self.assertContentBefore(response, "The Middle Item", "The Last Item")
def test_has_related_field_in_list_display_fk(self):
"""Joins shouldn't be performed for <FK>_id fields in list display."""
state = State.objects.create(name="Karnataka")
City.objects.create(state=state, name="Bangalore")
response = self.client.get(reverse("admin:admin_views_city_changelist"), {})
response.context["cl"].list_display = ["id", "name", "state"]
self.assertIs(response.context["cl"].has_related_field_in_list_display(), True)
response.context["cl"].list_display = ["id", "name", "state_id"]
self.assertIs(response.context["cl"].has_related_field_in_list_display(), False)
def test_has_related_field_in_list_display_o2o(self):
"""Joins shouldn't be performed for <O2O>_id fields in list display."""
media = Media.objects.create(name="Foo")
Vodcast.objects.create(media=media)
response = self.client.get(reverse("admin:admin_views_vodcast_changelist"), {})
response.context["cl"].list_display = ["media"]
self.assertIs(response.context["cl"].has_related_field_in_list_display(), True)
response.context["cl"].list_display = ["media_id"]
self.assertIs(response.context["cl"].has_related_field_in_list_display(), False)
def test_limited_filter(self):
"""
Admin changelist filters do not contain objects excluded via
limit_choices_to.
"""
response = self.client.get(reverse("admin:admin_views_thing_changelist"))
self.assertContains(
response,
'<div id="changelist-filter">',
msg_prefix="Expected filter not found in changelist view",
)
self.assertNotContains(
response,
'<a href="?color__id__exact=3">Blue</a>',
msg_prefix="Changelist filter not correctly limited by limit_choices_to",
)
def test_relation_spanning_filters(self):
changelist_url = reverse("admin:admin_views_chapterxtra1_changelist")
response = self.client.get(changelist_url)
self.assertContains(response, '<div id="changelist-filter">')
filters = {
"chap__id__exact": {
"values": [c.id for c in Chapter.objects.all()],
"test": lambda obj, value: obj.chap.id == value,
},
"chap__title": {
"values": [c.title for c in Chapter.objects.all()],
"test": lambda obj, value: obj.chap.title == value,
},
"chap__book__id__exact": {
"values": [b.id for b in Book.objects.all()],
"test": lambda obj, value: obj.chap.book.id == value,
},
"chap__book__name": {
"values": [b.name for b in Book.objects.all()],
"test": lambda obj, value: obj.chap.book.name == value,
},
"chap__book__promo__id__exact": {
"values": [p.id for p in Promo.objects.all()],
"test": lambda obj, value: obj.chap.book.promo_set.filter(
id=value
).exists(),
},
"chap__book__promo__name": {
"values": [p.name for p in Promo.objects.all()],
"test": lambda obj, value: obj.chap.book.promo_set.filter(
name=value
).exists(),
},
# A forward relation (book) after a reverse relation (promo).
"guest_author__promo__book__id__exact": {
"values": [p.id for p in Book.objects.all()],
"test": lambda obj, value: obj.guest_author.promo_set.filter(
book=value
).exists(),
},
}
for filter_path, params in filters.items():
for value in params["values"]:
query_string = urlencode({filter_path: value})
# ensure filter link exists
self.assertContains(response, '<a href="?%s"' % query_string)
# ensure link works
filtered_response = self.client.get(
"%s?%s" % (changelist_url, query_string)
)
self.assertEqual(filtered_response.status_code, 200)
# ensure changelist contains only valid objects
for obj in filtered_response.context["cl"].queryset.all():
self.assertTrue(params["test"](obj, value))
def test_incorrect_lookup_parameters(self):
"""Ensure incorrect lookup parameters are handled gracefully."""
changelist_url = reverse("admin:admin_views_thing_changelist")
response = self.client.get(changelist_url, {"notarealfield": "5"})
self.assertRedirects(response, "%s?e=1" % changelist_url)
# Spanning relationships through a nonexistent related object (Refs #16716)
response = self.client.get(changelist_url, {"notarealfield__whatever": "5"})
self.assertRedirects(response, "%s?e=1" % changelist_url)
response = self.client.get(
changelist_url, {"color__id__exact": "StringNotInteger!"}
)
self.assertRedirects(response, "%s?e=1" % changelist_url)
# Regression test for #18530
response = self.client.get(changelist_url, {"pub_date__gte": "foo"})
self.assertRedirects(response, "%s?e=1" % changelist_url)
def test_isnull_lookups(self):
"""Ensure is_null is handled correctly."""
Article.objects.create(
title="I Could Go Anywhere",
content="Versatile",
date=datetime.datetime.now(),
)
changelist_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(changelist_url)
self.assertContains(response, "4 articles")
response = self.client.get(changelist_url, {"section__isnull": "false"})
self.assertContains(response, "3 articles")
response = self.client.get(changelist_url, {"section__isnull": "0"})
self.assertContains(response, "3 articles")
response = self.client.get(changelist_url, {"section__isnull": "true"})
self.assertContains(response, "1 article")
response = self.client.get(changelist_url, {"section__isnull": "1"})
self.assertContains(response, "1 article")
def test_logout_and_password_change_URLs(self):
response = self.client.get(reverse("admin:admin_views_article_changelist"))
self.assertContains(
response,
'<form id="logout-form" method="post" action="%s">'
% reverse("admin:logout"),
)
self.assertContains(
response, '<a href="%s">' % reverse("admin:password_change")
)
def test_named_group_field_choices_change_list(self):
"""
Ensures the admin changelist shows correct values in the relevant column
for rows corresponding to instances of a model in which a named group
has been used in the choices option of a field.
"""
link1 = reverse("admin:admin_views_fabric_change", args=(self.fab1.pk,))
link2 = reverse("admin:admin_views_fabric_change", args=(self.fab2.pk,))
response = self.client.get(reverse("admin:admin_views_fabric_changelist"))
fail_msg = (
"Changelist table isn't showing the right human-readable values "
"set by a model field 'choices' option named group."
)
self.assertContains(
response,
'<a href="%s">Horizontal</a>' % link1,
msg_prefix=fail_msg,
html=True,
)
self.assertContains(
response,
'<a href="%s">Vertical</a>' % link2,
msg_prefix=fail_msg,
html=True,
)
def test_named_group_field_choices_filter(self):
"""
Ensures the filter UI shows correctly when at least one named group has
been used in the choices option of a model field.
"""
response = self.client.get(reverse("admin:admin_views_fabric_changelist"))
fail_msg = (
"Changelist filter isn't showing options contained inside a model "
"field 'choices' option named group."
)
self.assertContains(response, '<div id="changelist-filter">')
self.assertContains(
response,
'<a href="?surface__exact=x">Horizontal</a>',
msg_prefix=fail_msg,
html=True,
)
self.assertContains(
response,
'<a href="?surface__exact=y">Vertical</a>',
msg_prefix=fail_msg,
html=True,
)
def test_change_list_null_boolean_display(self):
Post.objects.create(public=None)
response = self.client.get(reverse("admin:admin_views_post_changelist"))
self.assertContains(response, "icon-unknown.svg")
def test_display_decorator_with_boolean_and_empty_value(self):
msg = (
"The boolean and empty_value arguments to the @display decorator "
"are mutually exclusive."
)
with self.assertRaisesMessage(ValueError, msg):
class BookAdmin(admin.ModelAdmin):
@admin.display(boolean=True, empty_value="(Missing)")
def is_published(self, obj):
return obj.publish_date is not None
def test_i18n_language_non_english_default(self):
"""
Check if the JavaScript i18n view returns an empty language catalog
if the default language is non-English but the selected language
is English. See #13388 and #3594 for more details.
"""
with self.settings(LANGUAGE_CODE="fr"), translation.override("en-us"):
response = self.client.get(reverse("admin:jsi18n"))
self.assertNotContains(response, "Choisir une heure")
def test_i18n_language_non_english_fallback(self):
"""
Makes sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE="fr"), translation.override("none"):
response = self.client.get(reverse("admin:jsi18n"))
self.assertContains(response, "Choisir une heure")
def test_jsi18n_with_context(self):
response = self.client.get(reverse("admin-extra-context:jsi18n"))
self.assertEqual(response.status_code, 200)
def test_jsi18n_format_fallback(self):
"""
The JavaScript i18n view doesn't return localized date/time formats
when the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE="ru"), translation.override("none"):
response = self.client.get(reverse("admin:jsi18n"))
self.assertNotContains(response, "%d.%m.%Y %H:%M:%S")
self.assertContains(response, "%Y-%m-%d %H:%M:%S")
def test_disallowed_filtering(self):
with self.assertLogs("django.security.DisallowedModelAdminLookup", "ERROR"):
response = self.client.get(
"%s?owner__email__startswith=fuzzy"
% reverse("admin:admin_views_album_changelist")
)
self.assertEqual(response.status_code, 400)
# Filters are allowed if explicitly included in list_filter
response = self.client.get(
"%s?color__value__startswith=red"
% reverse("admin:admin_views_thing_changelist")
)
self.assertEqual(response.status_code, 200)
response = self.client.get(
"%s?color__value=red" % reverse("admin:admin_views_thing_changelist")
)
self.assertEqual(response.status_code, 200)
# Filters should be allowed if they involve a local field without the
# need to allow them in list_filter or date_hierarchy.
response = self.client.get(
"%s?age__gt=30" % reverse("admin:admin_views_person_changelist")
)
self.assertEqual(response.status_code, 200)
e1 = Employee.objects.create(
name="Anonymous", gender=1, age=22, alive=True, code="123"
)
e2 = Employee.objects.create(
name="Visitor", gender=2, age=19, alive=True, code="124"
)
WorkHour.objects.create(datum=datetime.datetime.now(), employee=e1)
WorkHour.objects.create(datum=datetime.datetime.now(), employee=e2)
response = self.client.get(reverse("admin:admin_views_workhour_changelist"))
self.assertContains(response, "employee__person_ptr__exact")
response = self.client.get(
"%s?employee__person_ptr__exact=%d"
% (reverse("admin:admin_views_workhour_changelist"), e1.pk)
)
self.assertEqual(response.status_code, 200)
def test_disallowed_to_field(self):
url = reverse("admin:admin_views_section_changelist")
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.get(url, {TO_FIELD_VAR: "missing_field"})
self.assertEqual(response.status_code, 400)
# Specifying a field that is not referred by any other model registered
# to this admin site should raise an exception.
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.get(
reverse("admin:admin_views_section_changelist"), {TO_FIELD_VAR: "name"}
)
self.assertEqual(response.status_code, 400)
# Primary key should always be allowed, even if the referenced model
# isn't registered.
response = self.client.get(
reverse("admin:admin_views_notreferenced_changelist"), {TO_FIELD_VAR: "id"}
)
self.assertEqual(response.status_code, 200)
# Specifying a field referenced by another model though a m2m should be
# allowed.
response = self.client.get(
reverse("admin:admin_views_recipe_changelist"), {TO_FIELD_VAR: "rname"}
)
self.assertEqual(response.status_code, 200)
# Specifying a field referenced through a reverse m2m relationship
# should be allowed.
response = self.client.get(
reverse("admin:admin_views_ingredient_changelist"), {TO_FIELD_VAR: "iname"}
)
self.assertEqual(response.status_code, 200)
# Specifying a field that is not referred by any other model directly
# registered to this admin site but registered through inheritance
# should be allowed.
response = self.client.get(
reverse("admin:admin_views_referencedbyparent_changelist"),
{TO_FIELD_VAR: "name"},
)
self.assertEqual(response.status_code, 200)
# Specifying a field that is only referred to by a inline of a
# registered model should be allowed.
response = self.client.get(
reverse("admin:admin_views_referencedbyinline_changelist"),
{TO_FIELD_VAR: "name"},
)
self.assertEqual(response.status_code, 200)
# #25622 - Specifying a field of a model only referred by a generic
# relation should raise DisallowedModelAdminToField.
url = reverse("admin:admin_views_referencedbygenrel_changelist")
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.get(url, {TO_FIELD_VAR: "object_id"})
self.assertEqual(response.status_code, 400)
# We also want to prevent the add, change, and delete views from
# leaking a disallowed field value.
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.post(
reverse("admin:admin_views_section_add"), {TO_FIELD_VAR: "name"}
)
self.assertEqual(response.status_code, 400)
section = Section.objects.create()
url = reverse("admin:admin_views_section_change", args=(section.pk,))
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.post(url, {TO_FIELD_VAR: "name"})
self.assertEqual(response.status_code, 400)
url = reverse("admin:admin_views_section_delete", args=(section.pk,))
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.post(url, {TO_FIELD_VAR: "name"})
self.assertEqual(response.status_code, 400)
def test_allowed_filtering_15103(self):
"""
Regressions test for ticket 15103 - filtering on fields defined in a
ForeignKey 'limit_choices_to' should be allowed, otherwise raw_id_fields
can break.
"""
# Filters should be allowed if they are defined on a ForeignKey
# pointing to this model.
url = "%s?leader__name=Palin&leader__age=27" % reverse(
"admin:admin_views_inquisition_changelist"
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_popup_dismiss_related(self):
"""
Regression test for ticket 20664 - ensure the pk is properly quoted.
"""
actor = Actor.objects.create(name="Palin", age=27)
response = self.client.get(
"%s?%s" % (reverse("admin:admin_views_actor_changelist"), IS_POPUP_VAR)
)
self.assertContains(response, 'data-popup-opener="%s"' % actor.pk)
def test_hide_change_password(self):
"""
Tests if the "change password" link in the admin is hidden if the User
does not have a usable password set.
(against 9bea85795705d015cdadc82c68b99196a8554f5c)
"""
user = User.objects.get(username="super")
user.set_unusable_password()
user.save()
self.client.force_login(user)
response = self.client.get(reverse("admin:index"))
self.assertNotContains(
response,
reverse("admin:password_change"),
msg_prefix=(
'The "change password" link should not be displayed if a user does not '
"have a usable password."
),
)
def test_change_view_with_show_delete_extra_context(self):
"""
The 'show_delete' context variable in the admin's change view controls
the display of the delete button.
"""
instance = UndeletableObject.objects.create(name="foo")
response = self.client.get(
reverse("admin:admin_views_undeletableobject_change", args=(instance.pk,))
)
self.assertNotContains(response, "deletelink")
def test_change_view_logs_m2m_field_changes(self):
"""Changes to ManyToManyFields are included in the object's history."""
pizza = ReadablePizza.objects.create(name="Cheese")
cheese = Topping.objects.create(name="cheese")
post_data = {"name": pizza.name, "toppings": [cheese.pk]}
response = self.client.post(
reverse("admin:admin_views_readablepizza_change", args=(pizza.pk,)),
post_data,
)
self.assertRedirects(
response, reverse("admin:admin_views_readablepizza_changelist")
)
pizza_ctype = ContentType.objects.get_for_model(
ReadablePizza, for_concrete_model=False
)
log = LogEntry.objects.filter(
content_type=pizza_ctype, object_id=pizza.pk
).first()
self.assertEqual(log.get_change_message(), "Changed Toppings.")
def test_allows_attributeerror_to_bubble_up(self):
"""
AttributeErrors are allowed to bubble when raised inside a change list
view. Requires a model to be created so there's something to display.
Refs: #16655, #18593, and #18747
"""
Simple.objects.create()
with self.assertRaises(AttributeError):
self.client.get(reverse("admin:admin_views_simple_changelist"))
def test_changelist_with_no_change_url(self):
"""
ModelAdmin.changelist_view shouldn't result in a NoReverseMatch if url
for change_view is removed from get_urls (#20934).
"""
o = UnchangeableObject.objects.create()
response = self.client.get(
reverse("admin:admin_views_unchangeableobject_changelist")
)
# Check the format of the shown object -- shouldn't contain a change link
self.assertContains(
response, '<th class="field-__str__">%s</th>' % o, html=True
)
def test_invalid_appindex_url(self):
"""
#21056 -- URL reversing shouldn't work for nonexistent apps.
"""
good_url = "/test_admin/admin/admin_views/"
confirm_good_url = reverse(
"admin:app_list", kwargs={"app_label": "admin_views"}
)
self.assertEqual(good_url, confirm_good_url)
with self.assertRaises(NoReverseMatch):
reverse("admin:app_list", kwargs={"app_label": "this_should_fail"})
with self.assertRaises(NoReverseMatch):
reverse("admin:app_list", args=("admin_views2",))
def test_resolve_admin_views(self):
index_match = resolve("/test_admin/admin4/")
list_match = resolve("/test_admin/admin4/auth/user/")
self.assertIs(index_match.func.admin_site, customadmin.simple_site)
self.assertIsInstance(
list_match.func.model_admin, customadmin.CustomPwdTemplateUserAdmin
)
def test_adminsite_display_site_url(self):
"""
#13749 - Admin should display link to front-end site 'View site'
"""
url = reverse("admin:index")
response = self.client.get(url)
self.assertEqual(response.context["site_url"], "/my-site-url/")
self.assertContains(response, '<a href="/my-site-url/">View site</a>')
def test_date_hierarchy_empty_queryset(self):
self.assertIs(Question.objects.exists(), False)
response = self.client.get(reverse("admin:admin_views_answer2_changelist"))
self.assertEqual(response.status_code, 200)
@override_settings(TIME_ZONE="America/Sao_Paulo", USE_TZ=True)
def test_date_hierarchy_timezone_dst(self):
# This datetime doesn't exist in this timezone due to DST.
for date in make_aware_datetimes(
datetime.datetime(2016, 10, 16, 15), "America/Sao_Paulo"
):
with self.subTest(repr(date.tzinfo)):
q = Question.objects.create(question="Why?", expires=date)
Answer2.objects.create(question=q, answer="Because.")
response = self.client.get(
reverse("admin:admin_views_answer2_changelist")
)
self.assertContains(response, "question__expires__day=16")
self.assertContains(response, "question__expires__month=10")
self.assertContains(response, "question__expires__year=2016")
@override_settings(TIME_ZONE="America/Los_Angeles", USE_TZ=True)
def test_date_hierarchy_local_date_differ_from_utc(self):
# This datetime is 2017-01-01 in UTC.
for date in make_aware_datetimes(
datetime.datetime(2016, 12, 31, 16), "America/Los_Angeles"
):
with self.subTest(repr(date.tzinfo)):
q = Question.objects.create(question="Why?", expires=date)
Answer2.objects.create(question=q, answer="Because.")
response = self.client.get(
reverse("admin:admin_views_answer2_changelist")
)
self.assertContains(response, "question__expires__day=31")
self.assertContains(response, "question__expires__month=12")
self.assertContains(response, "question__expires__year=2016")
def test_sortable_by_columns_subset(self):
expected_sortable_fields = ("date", "callable_year")
expected_not_sortable_fields = (
"content",
"model_year",
"modeladmin_year",
"model_year_reversed",
"section",
)
response = self.client.get(reverse("admin6:admin_views_article_changelist"))
for field_name in expected_sortable_fields:
self.assertContains(
response, '<th scope="col" class="sortable column-%s">' % field_name
)
for field_name in expected_not_sortable_fields:
self.assertContains(
response, '<th scope="col" class="column-%s">' % field_name
)
def test_get_sortable_by_columns_subset(self):
response = self.client.get(reverse("admin6:admin_views_actor_changelist"))
self.assertContains(response, '<th scope="col" class="sortable column-age">')
self.assertContains(response, '<th scope="col" class="column-name">')
def test_sortable_by_no_column(self):
expected_not_sortable_fields = ("title", "book")
response = self.client.get(reverse("admin6:admin_views_chapter_changelist"))
for field_name in expected_not_sortable_fields:
self.assertContains(
response, '<th scope="col" class="column-%s">' % field_name
)
self.assertNotContains(response, '<th scope="col" class="sortable column')
def test_get_sortable_by_no_column(self):
response = self.client.get(reverse("admin6:admin_views_color_changelist"))
self.assertContains(response, '<th scope="col" class="column-value">')
self.assertNotContains(response, '<th scope="col" class="sortable column')
def test_app_index_context(self):
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertContains(
response,
"<title>Admin_Views administration | Django site admin</title>",
)
self.assertEqual(response.context["title"], "Admin_Views administration")
self.assertEqual(response.context["app_label"], "admin_views")
# Models are sorted alphabetically by default.
models = [model["name"] for model in response.context["app_list"][0]["models"]]
self.assertSequenceEqual(models, sorted(models))
def test_app_index_context_reordered(self):
self.client.force_login(self.superuser)
response = self.client.get(reverse("admin2:app_list", args=("admin_views",)))
self.assertContains(
response,
"<title>Admin_Views administration | Django site admin</title>",
)
# Models are in reverse order.
models = [model["name"] for model in response.context["app_list"][0]["models"]]
self.assertSequenceEqual(models, sorted(models, reverse=True))
def test_change_view_subtitle_per_object(self):
response = self.client.get(
reverse("admin:admin_views_article_change", args=(self.a1.pk,)),
)
self.assertContains(
response,
"<title>Article 1 | Change article | Django site admin</title>",
)
self.assertContains(response, "<h1>Change article</h1>")
self.assertContains(response, "<h2>Article 1</h2>")
response = self.client.get(
reverse("admin:admin_views_article_change", args=(self.a2.pk,)),
)
self.assertContains(
response,
"<title>Article 2 | Change article | Django site admin</title>",
)
self.assertContains(response, "<h1>Change article</h1>")
self.assertContains(response, "<h2>Article 2</h2>")
def test_view_subtitle_per_object(self):
viewuser = User.objects.create_user(
username="viewuser",
password="secret",
is_staff=True,
)
viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("view", Article._meta)),
)
self.client.force_login(viewuser)
response = self.client.get(
reverse("admin:admin_views_article_change", args=(self.a1.pk,)),
)
self.assertContains(
response,
"<title>Article 1 | View article | Django site admin</title>",
)
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(response, "<h2>Article 1</h2>")
response = self.client.get(
reverse("admin:admin_views_article_change", args=(self.a2.pk,)),
)
self.assertContains(
response,
"<title>Article 2 | View article | Django site admin</title>",
)
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(response, "<h2>Article 2</h2>")
def test_formset_kwargs_can_be_overridden(self):
response = self.client.get(reverse("admin:admin_views_city_add"))
self.assertContains(response, "overridden_name")
def test_render_views_no_subtitle(self):
tests = [
reverse("admin:index"),
reverse("admin:password_change"),
reverse("admin:app_list", args=("admin_views",)),
reverse("admin:admin_views_article_delete", args=(self.a1.pk,)),
reverse("admin:admin_views_article_history", args=(self.a1.pk,)),
]
for url in tests:
with self.subTest(url=url):
with self.assertNoLogs("django.template", "DEBUG"):
self.client.get(url)
# Login must be after logout.
with self.assertNoLogs("django.template", "DEBUG"):
self.client.post(reverse("admin:logout"))
self.client.get(reverse("admin:login"))
def test_render_delete_selected_confirmation_no_subtitle(self):
post_data = {
"action": "delete_selected",
"selected_across": "0",
"index": "0",
"_selected_action": self.a1.pk,
}
with self.assertNoLogs("django.template", "DEBUG"):
self.client.post(reverse("admin:admin_views_article_changelist"), post_data)
@override_settings(
AUTH_PASSWORD_VALIDATORS=[
{
"NAME": (
"django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator"
)
},
{
"NAME": (
"django.contrib.auth.password_validation."
"NumericPasswordValidator"
)
},
]
)
def test_password_change_helptext(self):
response = self.client.get(reverse("admin:password_change"))
self.assertContains(
response, '<div class="help" id="id_new_password1_helptext">'
)
@override_settings(
AUTH_PASSWORD_VALIDATORS=[
{
"NAME": (
"django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator"
)
},
{
"NAME": (
"django.contrib.auth.password_validation." "NumericPasswordValidator"
)
},
],
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
# Put this app's and the shared tests templates dirs in DIRS to
# take precedence over the admin's templates dir.
"DIRS": [
os.path.join(os.path.dirname(__file__), "templates"),
os.path.join(os.path.dirname(os.path.dirname(__file__)), "templates"),
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
class AdminCustomTemplateTests(AdminViewBasicTestCase):
def test_custom_model_admin_templates(self):
# Test custom change list template with custom extra context
response = self.client.get(
reverse("admin:admin_views_customarticle_changelist")
)
self.assertContains(response, "var hello = 'Hello!';")
self.assertTemplateUsed(response, "custom_admin/change_list.html")
# Test custom add form template
response = self.client.get(reverse("admin:admin_views_customarticle_add"))
self.assertTemplateUsed(response, "custom_admin/add_form.html")
# Add an article so we can test delete, change, and history views
post = self.client.post(
reverse("admin:admin_views_customarticle_add"),
{
"content": "<p>great article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
},
)
self.assertRedirects(
post, reverse("admin:admin_views_customarticle_changelist")
)
self.assertEqual(CustomArticle.objects.count(), 1)
article_pk = CustomArticle.objects.all()[0].pk
# Test custom delete, change, and object history templates
# Test custom change form template
response = self.client.get(
reverse("admin:admin_views_customarticle_change", args=(article_pk,))
)
self.assertTemplateUsed(response, "custom_admin/change_form.html")
response = self.client.get(
reverse("admin:admin_views_customarticle_delete", args=(article_pk,))
)
self.assertTemplateUsed(response, "custom_admin/delete_confirmation.html")
response = self.client.post(
reverse("admin:admin_views_customarticle_changelist"),
data={
"index": 0,
"action": ["delete_selected"],
"_selected_action": ["1"],
},
)
self.assertTemplateUsed(
response, "custom_admin/delete_selected_confirmation.html"
)
response = self.client.get(
reverse("admin:admin_views_customarticle_history", args=(article_pk,))
)
self.assertTemplateUsed(response, "custom_admin/object_history.html")
# A custom popup response template may be specified by
# ModelAdmin.popup_response_template.
response = self.client.post(
reverse("admin:admin_views_customarticle_add") + "?%s=1" % IS_POPUP_VAR,
{
"content": "<p>great article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
IS_POPUP_VAR: "1",
},
)
self.assertEqual(response.template_name, "custom_admin/popup_response.html")
def test_extended_bodyclass_template_change_form(self):
"""
The admin/change_form.html template uses block.super in the
bodyclass block.
"""
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_change_password_template(self):
user = User.objects.get(username="super")
response = self.client.get(
reverse("admin:auth_user_password_change", args=(user.id,))
)
# The auth/user/change_password.html template uses super in the
# bodyclass block.
self.assertContains(response, "bodyclass_consistency_check ")
# When a site has multiple passwords in the browser's password manager,
# a browser pop up asks which user the new password is for. To prevent
# this, the username is added to the change password form.
self.assertContains(
response, '<input type="text" name="username" value="super" class="hidden">'
)
# help text for passwords has an id.
self.assertContains(
response,
'<div class="help" id="id_password1_helptext"><ul><li>'
"Your password can’t be too similar to your other personal information."
"</li><li>Your password can’t be entirely numeric.</li></ul></div>",
)
self.assertContains(
response,
'<div class="help" id="id_password2_helptext">'
"Enter the same password as before, for verification.</div>",
)
def test_extended_bodyclass_template_index(self):
"""
The admin/index.html template uses block.super in the bodyclass block.
"""
response = self.client.get(reverse("admin:index"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_change_list(self):
"""
The admin/change_list.html' template uses block.super
in the bodyclass block.
"""
response = self.client.get(reverse("admin:admin_views_article_changelist"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_template_login(self):
"""
The admin/login.html template uses block.super in the
bodyclass block.
"""
self.client.logout()
response = self.client.get(reverse("admin:login"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_template_delete_confirmation(self):
"""
The admin/delete_confirmation.html template uses
block.super in the bodyclass block.
"""
group = Group.objects.create(name="foogroup")
response = self.client.get(reverse("admin:auth_group_delete", args=(group.id,)))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_template_delete_selected_confirmation(self):
"""
The admin/delete_selected_confirmation.html template uses
block.super in bodyclass block.
"""
group = Group.objects.create(name="foogroup")
post_data = {
"action": "delete_selected",
"selected_across": "0",
"index": "0",
"_selected_action": group.id,
}
response = self.client.post(reverse("admin:auth_group_changelist"), post_data)
self.assertEqual(response.context["site_header"], "Django administration")
self.assertContains(response, "bodyclass_consistency_check ")
def test_filter_with_custom_template(self):
"""
A custom template can be used to render an admin filter.
"""
response = self.client.get(reverse("admin:admin_views_color2_changelist"))
self.assertTemplateUsed(response, "custom_filter_template.html")
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewFormUrlTest(TestCase):
current_app = "admin3"
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_change_form_URL_has_correct_value(self):
"""
change_view has form_url in response.context
"""
response = self.client.get(
reverse(
"admin:admin_views_section_change",
args=(self.s1.pk,),
current_app=self.current_app,
)
)
self.assertIn(
"form_url", response.context, msg="form_url not present in response.context"
)
self.assertEqual(response.context["form_url"], "pony")
def test_initial_data_can_be_overridden(self):
"""
The behavior for setting initial form data can be overridden in the
ModelAdmin class. Usually, the initial value is set via the GET params.
"""
response = self.client.get(
reverse("admin:admin_views_restaurant_add", current_app=self.current_app),
{"name": "test_value"},
)
# this would be the usual behaviour
self.assertNotContains(response, 'value="test_value"')
# this is the overridden behaviour
self.assertContains(response, 'value="overridden_value"')
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminJavaScriptTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_js_minified_only_if_debug_is_false(self):
"""
The minified versions of the JS files are only used when DEBUG is False.
"""
with override_settings(DEBUG=False):
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertNotContains(response, "vendor/jquery/jquery.js")
self.assertContains(response, "vendor/jquery/jquery.min.js")
self.assertContains(response, "prepopulate.js")
self.assertContains(response, "actions.js")
self.assertContains(response, "collapse.js")
self.assertContains(response, "inlines.js")
with override_settings(DEBUG=True):
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertContains(response, "vendor/jquery/jquery.js")
self.assertNotContains(response, "vendor/jquery/jquery.min.js")
self.assertContains(response, "prepopulate.js")
self.assertContains(response, "actions.js")
self.assertContains(response, "collapse.js")
self.assertContains(response, "inlines.js")
@override_settings(ROOT_URLCONF="admin_views.urls")
class SaveAsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.per1 = Person.objects.create(name="John Mauchly", gender=1, alive=True)
def setUp(self):
self.client.force_login(self.superuser)
def test_save_as_duplication(self):
"""'save as' creates a new person"""
post_data = {"_saveasnew": "", "name": "John M", "gender": 1, "age": 42}
response = self.client.post(
reverse("admin:admin_views_person_change", args=(self.per1.pk,)), post_data
)
self.assertEqual(len(Person.objects.filter(name="John M")), 1)
self.assertEqual(len(Person.objects.filter(id=self.per1.pk)), 1)
new_person = Person.objects.latest("id")
self.assertRedirects(
response, reverse("admin:admin_views_person_change", args=(new_person.pk,))
)
def test_save_as_continue_false(self):
"""
Saving a new object using "Save as new" redirects to the changelist
instead of the change view when ModelAdmin.save_as_continue=False.
"""
post_data = {"_saveasnew": "", "name": "John M", "gender": 1, "age": 42}
url = reverse(
"admin:admin_views_person_change",
args=(self.per1.pk,),
current_app=site2.name,
)
response = self.client.post(url, post_data)
self.assertEqual(len(Person.objects.filter(name="John M")), 1)
self.assertEqual(len(Person.objects.filter(id=self.per1.pk)), 1)
self.assertRedirects(
response,
reverse("admin:admin_views_person_changelist", current_app=site2.name),
)
def test_save_as_new_with_validation_errors(self):
"""
When you click "Save as new" and have a validation error,
you only see the "Save as new" button and not the other save buttons,
and that only the "Save as" button is visible.
"""
response = self.client.post(
reverse("admin:admin_views_person_change", args=(self.per1.pk,)),
{
"_saveasnew": "",
"gender": "invalid",
"_addanother": "fail",
},
)
self.assertContains(response, "Please correct the errors below.")
self.assertFalse(response.context["show_save_and_add_another"])
self.assertFalse(response.context["show_save_and_continue"])
self.assertTrue(response.context["show_save_as_new"])
def test_save_as_new_with_validation_errors_with_inlines(self):
parent = Parent.objects.create(name="Father")
child = Child.objects.create(parent=parent, name="Child")
response = self.client.post(
reverse("admin:admin_views_parent_change", args=(parent.pk,)),
{
"_saveasnew": "Save as new",
"child_set-0-parent": parent.pk,
"child_set-0-id": child.pk,
"child_set-0-name": "Child",
"child_set-INITIAL_FORMS": 1,
"child_set-MAX_NUM_FORMS": 1000,
"child_set-MIN_NUM_FORMS": 0,
"child_set-TOTAL_FORMS": 4,
"name": "_invalid",
},
)
self.assertContains(response, "Please correct the error below.")
self.assertFalse(response.context["show_save_and_add_another"])
self.assertFalse(response.context["show_save_and_continue"])
self.assertTrue(response.context["show_save_as_new"])
def test_save_as_new_with_inlines_with_validation_errors(self):
parent = Parent.objects.create(name="Father")
child = Child.objects.create(parent=parent, name="Child")
response = self.client.post(
reverse("admin:admin_views_parent_change", args=(parent.pk,)),
{
"_saveasnew": "Save as new",
"child_set-0-parent": parent.pk,
"child_set-0-id": child.pk,
"child_set-0-name": "_invalid",
"child_set-INITIAL_FORMS": 1,
"child_set-MAX_NUM_FORMS": 1000,
"child_set-MIN_NUM_FORMS": 0,
"child_set-TOTAL_FORMS": 4,
"name": "Father",
},
)
self.assertContains(response, "Please correct the error below.")
self.assertFalse(response.context["show_save_and_add_another"])
self.assertFalse(response.context["show_save_and_continue"])
self.assertTrue(response.context["show_save_as_new"])
@override_settings(ROOT_URLCONF="admin_views.urls")
class CustomModelAdminTest(AdminViewBasicTestCase):
def test_custom_admin_site_login_form(self):
self.client.logout()
response = self.client.get(reverse("admin2:index"), follow=True)
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
login = self.client.post(
reverse("admin2:login"),
{
REDIRECT_FIELD_NAME: reverse("admin2:index"),
"username": "customform",
"password": "secret",
},
follow=True,
)
self.assertIsInstance(login, TemplateResponse)
self.assertContains(login, "custom form error")
self.assertContains(login, "path/to/media.css")
def test_custom_admin_site_login_template(self):
self.client.logout()
response = self.client.get(reverse("admin2:index"), follow=True)
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/login.html")
self.assertContains(response, "Hello from a custom login template")
def test_custom_admin_site_logout_template(self):
response = self.client.post(reverse("admin2:logout"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/logout.html")
self.assertContains(response, "Hello from a custom logout template")
def test_custom_admin_site_index_view_and_template(self):
response = self.client.get(reverse("admin2:index"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/index.html")
self.assertContains(response, "Hello from a custom index template *bar*")
def test_custom_admin_site_app_index_view_and_template(self):
response = self.client.get(reverse("admin2:app_list", args=("admin_views",)))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/app_index.html")
self.assertContains(response, "Hello from a custom app_index template")
def test_custom_admin_site_password_change_template(self):
response = self.client.get(reverse("admin2:password_change"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/password_change_form.html")
self.assertContains(
response, "Hello from a custom password change form template"
)
def test_custom_admin_site_password_change_with_extra_context(self):
response = self.client.get(reverse("admin2:password_change"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/password_change_form.html")
self.assertContains(response, "eggs")
def test_custom_admin_site_password_change_done_template(self):
response = self.client.get(reverse("admin2:password_change_done"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/password_change_done.html")
self.assertContains(
response, "Hello from a custom password change done template"
)
def test_custom_admin_site_view(self):
self.client.force_login(self.superuser)
response = self.client.get(reverse("admin2:my_view"))
self.assertEqual(response.content, b"Django is a magical pony!")
def test_pwd_change_custom_template(self):
self.client.force_login(self.superuser)
su = User.objects.get(username="super")
response = self.client.get(
reverse("admin4:auth_user_password_change", args=(su.pk,))
)
self.assertEqual(response.status_code, 200)
def get_perm(Model, codename):
"""Return the permission object, for the Model"""
ct = ContentType.objects.get_for_model(Model, for_concrete_model=False)
return Permission.objects.get(content_type=ct, codename=codename)
@override_settings(
ROOT_URLCONF="admin_views.urls",
# Test with the admin's documented list of required context processors.
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
class AdminViewPermissionsTest(TestCase):
"""Tests for Admin Views Permissions."""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.viewuser = User.objects.create_user(
username="viewuser", password="secret", is_staff=True
)
cls.adduser = User.objects.create_user(
username="adduser", password="secret", is_staff=True
)
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.deleteuser = User.objects.create_user(
username="deleteuser", password="secret", is_staff=True
)
cls.joepublicuser = User.objects.create_user(
username="joepublic", password="secret"
)
cls.nostaffuser = User.objects.create_user(
username="nostaff", password="secret"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
another_section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
# Setup permissions, for our users who can add, change, and delete.
opts = Article._meta
# User who can view Articles
cls.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("view", opts))
)
# User who can add Articles
cls.adduser.user_permissions.add(
get_perm(Article, get_permission_codename("add", opts))
)
# User who can change Articles
cls.changeuser.user_permissions.add(
get_perm(Article, get_permission_codename("change", opts))
)
cls.nostaffuser.user_permissions.add(
get_perm(Article, get_permission_codename("change", opts))
)
# User who can delete Articles
cls.deleteuser.user_permissions.add(
get_perm(Article, get_permission_codename("delete", opts))
)
cls.deleteuser.user_permissions.add(
get_perm(Section, get_permission_codename("delete", Section._meta))
)
# login POST dicts
cls.index_url = reverse("admin:index")
cls.super_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "super",
"password": "secret",
}
cls.super_email_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "[email protected]",
"password": "secret",
}
cls.super_email_bad_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "[email protected]",
"password": "notsecret",
}
cls.adduser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "adduser",
"password": "secret",
}
cls.changeuser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "changeuser",
"password": "secret",
}
cls.deleteuser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "deleteuser",
"password": "secret",
}
cls.nostaff_login = {
REDIRECT_FIELD_NAME: reverse("has_permission_admin:index"),
"username": "nostaff",
"password": "secret",
}
cls.joepublic_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "joepublic",
"password": "secret",
}
cls.viewuser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "viewuser",
"password": "secret",
}
cls.no_username_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"password": "secret",
}
def test_login(self):
"""
Make sure only staff members can log in.
Successful posts to the login page will redirect to the original url.
Unsuccessful attempts will continue to render the login page with
a 200 status code.
"""
login_url = "%s?next=%s" % (reverse("admin:login"), reverse("admin:index"))
# Super User
response = self.client.get(self.index_url)
self.assertRedirects(response, login_url)
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Test if user enters email address
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.super_email_login)
self.assertContains(login, ERROR_MESSAGE)
# only correct passwords get a username hint
login = self.client.post(login_url, self.super_email_bad_login)
self.assertContains(login, ERROR_MESSAGE)
new_user = User(username="jondoe", password="secret", email="[email protected]")
new_user.save()
# check to ensure if there are multiple email addresses a user doesn't get a 500
login = self.client.post(login_url, self.super_email_login)
self.assertContains(login, ERROR_MESSAGE)
# View User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.viewuser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Add User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.adduser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Change User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.changeuser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Delete User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.deleteuser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Regular User should not be able to login.
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.joepublic_login)
self.assertContains(login, ERROR_MESSAGE)
# Requests without username should not return 500 errors.
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.no_username_login)
self.assertEqual(login.status_code, 200)
self.assertFormError(
login.context["form"], "username", ["This field is required."]
)
def test_login_redirect_for_direct_get(self):
"""
Login redirect should be to the admin index page when going directly to
/admin/login/.
"""
response = self.client.get(reverse("admin:login"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context[REDIRECT_FIELD_NAME], reverse("admin:index"))
def test_login_has_permission(self):
# Regular User should not be able to login.
response = self.client.get(reverse("has_permission_admin:index"))
self.assertEqual(response.status_code, 302)
login = self.client.post(
reverse("has_permission_admin:login"), self.joepublic_login
)
self.assertContains(login, "permission denied")
# User with permissions should be able to login.
response = self.client.get(reverse("has_permission_admin:index"))
self.assertEqual(response.status_code, 302)
login = self.client.post(
reverse("has_permission_admin:login"), self.nostaff_login
)
self.assertRedirects(login, reverse("has_permission_admin:index"))
self.assertFalse(login.context)
self.client.post(reverse("has_permission_admin:logout"))
# Staff should be able to login.
response = self.client.get(reverse("has_permission_admin:index"))
self.assertEqual(response.status_code, 302)
login = self.client.post(
reverse("has_permission_admin:login"),
{
REDIRECT_FIELD_NAME: reverse("has_permission_admin:index"),
"username": "deleteuser",
"password": "secret",
},
)
self.assertRedirects(login, reverse("has_permission_admin:index"))
self.assertFalse(login.context)
self.client.post(reverse("has_permission_admin:logout"))
def test_login_successfully_redirects_to_original_URL(self):
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
query_string = "the-answer=42"
redirect_url = "%s?%s" % (self.index_url, query_string)
new_next = {REDIRECT_FIELD_NAME: redirect_url}
post_data = self.super_login.copy()
post_data.pop(REDIRECT_FIELD_NAME)
login = self.client.post(
"%s?%s" % (reverse("admin:login"), urlencode(new_next)), post_data
)
self.assertRedirects(login, redirect_url)
def test_double_login_is_not_allowed(self):
"""Regression test for #19327"""
login_url = "%s?next=%s" % (reverse("admin:login"), reverse("admin:index"))
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
# Establish a valid admin session
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
# Logging in with non-admin user fails
login = self.client.post(login_url, self.joepublic_login)
self.assertContains(login, ERROR_MESSAGE)
# Establish a valid admin session
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
# Logging in with admin user while already logged in
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
def test_login_page_notice_for_non_staff_users(self):
"""
A logged-in non-staff user trying to access the admin index should be
presented with the login page and a hint indicating that the current
user doesn't have access to it.
"""
hint_template = "You are authenticated as {}"
# Anonymous user should not be shown the hint
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, "login-form")
self.assertNotContains(response, hint_template.format(""), status_code=200)
# Non-staff user should be shown the hint
self.client.force_login(self.nostaffuser)
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, "login-form")
self.assertContains(
response, hint_template.format(self.nostaffuser.username), status_code=200
)
def test_add_view(self):
"""Test add view restricts access and actually adds items."""
add_dict = {
"title": "Døm ikke",
"content": "<p>great article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
"section": self.s1.pk,
}
# Change User should not have access to add articles
self.client.force_login(self.changeuser)
# make sure the view removes test cookie
self.assertIs(self.client.session.test_cookie_worked(), False)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.status_code, 403)
# Try POST just to make sure
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.post(reverse("admin:logout"))
# View User should not have access to add articles
self.client.force_login(self.viewuser)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.status_code, 403)
# Try POST just to make sure
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
# Now give the user permission to add but not change.
self.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("add", Article._meta))
)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.context["title"], "Add article")
self.assertContains(response, "<title>Add article | Django site admin</title>")
self.assertContains(
response, '<input type="submit" value="Save and view" name="_continue">'
)
post = self.client.post(
reverse("admin:admin_views_article_add"), add_dict, follow=False
)
self.assertEqual(post.status_code, 302)
self.assertEqual(Article.objects.count(), 4)
article = Article.objects.latest("pk")
response = self.client.get(
reverse("admin:admin_views_article_change", args=(article.pk,))
)
self.assertContains(
response,
'<li class="success">The article “Døm ikke” was added successfully.</li>',
)
article.delete()
self.client.post(reverse("admin:logout"))
# Add user may login and POST to add view, then redirect to admin root
self.client.force_login(self.adduser)
addpage = self.client.get(reverse("admin:admin_views_article_add"))
change_list_link = '› <a href="%s">Articles</a>' % reverse(
"admin:admin_views_article_changelist"
)
self.assertNotContains(
addpage,
change_list_link,
msg_prefix=(
"User restricted to add permission is given link to change list view "
"in breadcrumbs."
),
)
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertRedirects(post, self.index_url)
self.assertEqual(Article.objects.count(), 4)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, "Greetings from a created object")
self.client.post(reverse("admin:logout"))
# The addition was logged correctly
addition_log = LogEntry.objects.all()[0]
new_article = Article.objects.last()
article_ct = ContentType.objects.get_for_model(Article)
self.assertEqual(addition_log.user_id, self.adduser.pk)
self.assertEqual(addition_log.content_type_id, article_ct.pk)
self.assertEqual(addition_log.object_id, str(new_article.pk))
self.assertEqual(addition_log.object_repr, "Døm ikke")
self.assertEqual(addition_log.action_flag, ADDITION)
self.assertEqual(addition_log.get_change_message(), "Added.")
# Super can add too, but is redirected to the change list view
self.client.force_login(self.superuser)
addpage = self.client.get(reverse("admin:admin_views_article_add"))
self.assertContains(
addpage,
change_list_link,
msg_prefix=(
"Unrestricted user is not given link to change list view in "
"breadcrumbs."
),
)
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertRedirects(post, reverse("admin:admin_views_article_changelist"))
self.assertEqual(Article.objects.count(), 5)
self.client.post(reverse("admin:logout"))
# 8509 - if a normal user is already logged in, it is possible
# to change user into the superuser without error
self.client.force_login(self.joepublicuser)
# Check and make sure that if user expires, data still persists
self.client.force_login(self.superuser)
# make sure the view removes test cookie
self.assertIs(self.client.session.test_cookie_worked(), False)
@mock.patch("django.contrib.admin.options.InlineModelAdmin.has_change_permission")
def test_add_view_with_view_only_inlines(self, has_change_permission):
"""User with add permission to a section but view-only for inlines."""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("add", Section._meta))
)
self.client.force_login(self.viewuser)
# Valid POST creates a new section.
data = {
"name": "New obj",
"article_set-TOTAL_FORMS": 0,
"article_set-INITIAL_FORMS": 0,
}
response = self.client.post(reverse("admin:admin_views_section_add"), data)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(Section.objects.latest("id").name, data["name"])
# InlineModelAdmin.has_change_permission()'s obj argument is always
# None during object add.
self.assertEqual(
[obj for (request, obj), _ in has_change_permission.call_args_list],
[None, None],
)
def test_change_view(self):
"""Change view should restrict access and allow users to edit items."""
change_dict = {
"title": "Ikke fordømt",
"content": "<p>edited article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
"section": self.s1.pk,
}
article_change_url = reverse(
"admin:admin_views_article_change", args=(self.a1.pk,)
)
article_changelist_url = reverse("admin:admin_views_article_changelist")
# add user should not be able to view the list of article or change any of them
self.client.force_login(self.adduser)
response = self.client.get(article_changelist_url)
self.assertEqual(response.status_code, 403)
response = self.client.get(article_change_url)
self.assertEqual(response.status_code, 403)
post = self.client.post(article_change_url, change_dict)
self.assertEqual(post.status_code, 403)
self.client.post(reverse("admin:logout"))
# view user can view articles but not make changes.
self.client.force_login(self.viewuser)
response = self.client.get(article_changelist_url)
self.assertContains(
response,
"<title>Select article to view | Django site admin</title>",
)
self.assertContains(response, "<h1>Select article to view</h1>")
self.assertEqual(response.context["title"], "Select article to view")
response = self.client.get(article_change_url)
self.assertContains(response, "<title>View article | Django site admin</title>")
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(response, "<label>Extra form field:</label>")
self.assertContains(
response,
'<a href="/test_admin/admin/admin_views/article/" class="closelink">Close'
"</a>",
)
self.assertEqual(response.context["title"], "View article")
post = self.client.post(article_change_url, change_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(
Article.objects.get(pk=self.a1.pk).content, "<p>Middle content</p>"
)
self.client.post(reverse("admin:logout"))
# change user can view all items and edit them
self.client.force_login(self.changeuser)
response = self.client.get(article_changelist_url)
self.assertEqual(response.context["title"], "Select article to change")
self.assertContains(
response,
"<title>Select article to change | Django site admin</title>",
)
self.assertContains(response, "<h1>Select article to change</h1>")
response = self.client.get(article_change_url)
self.assertEqual(response.context["title"], "Change article")
self.assertContains(
response,
"<title>Change article | Django site admin</title>",
)
self.assertContains(response, "<h1>Change article</h1>")
post = self.client.post(article_change_url, change_dict)
self.assertRedirects(post, article_changelist_url)
self.assertEqual(
Article.objects.get(pk=self.a1.pk).content, "<p>edited article</p>"
)
# one error in form should produce singular error message, multiple
# errors plural.
change_dict["title"] = ""
post = self.client.post(article_change_url, change_dict)
self.assertContains(
post,
"Please correct the error below.",
msg_prefix=(
"Singular error message not found in response to post with one error"
),
)
change_dict["content"] = ""
post = self.client.post(article_change_url, change_dict)
self.assertContains(
post,
"Please correct the errors below.",
msg_prefix=(
"Plural error message not found in response to post with multiple "
"errors"
),
)
self.client.post(reverse("admin:logout"))
# Test redirection when using row-level change permissions. Refs #11513.
r1 = RowLevelChangePermissionModel.objects.create(id=1, name="odd id")
r2 = RowLevelChangePermissionModel.objects.create(id=2, name="even id")
r3 = RowLevelChangePermissionModel.objects.create(id=3, name="odd id mult 3")
r6 = RowLevelChangePermissionModel.objects.create(id=6, name="even id mult 3")
change_url_1 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r1.pk,)
)
change_url_2 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r2.pk,)
)
change_url_3 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r3.pk,)
)
change_url_6 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r6.pk,)
)
logins = [
self.superuser,
self.viewuser,
self.adduser,
self.changeuser,
self.deleteuser,
]
for login_user in logins:
with self.subTest(login_user.username):
self.client.force_login(login_user)
response = self.client.get(change_url_1)
self.assertEqual(response.status_code, 403)
response = self.client.post(change_url_1, {"name": "changed"})
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=1).name, "odd id"
)
self.assertEqual(response.status_code, 403)
response = self.client.get(change_url_2)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_2, {"name": "changed"})
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=2).name, "changed"
)
self.assertRedirects(response, self.index_url)
response = self.client.get(change_url_3)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_3, {"name": "changed"})
self.assertEqual(response.status_code, 403)
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=3).name,
"odd id mult 3",
)
response = self.client.get(change_url_6)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_6, {"name": "changed"})
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=6).name, "changed"
)
self.assertRedirects(response, self.index_url)
self.client.post(reverse("admin:logout"))
for login_user in [self.joepublicuser, self.nostaffuser]:
with self.subTest(login_user.username):
self.client.force_login(login_user)
response = self.client.get(change_url_1, follow=True)
self.assertContains(response, "login-form")
response = self.client.post(
change_url_1, {"name": "changed"}, follow=True
)
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=1).name, "odd id"
)
self.assertContains(response, "login-form")
response = self.client.get(change_url_2, follow=True)
self.assertContains(response, "login-form")
response = self.client.post(
change_url_2, {"name": "changed again"}, follow=True
)
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=2).name, "changed"
)
self.assertContains(response, "login-form")
self.client.post(reverse("admin:logout"))
def test_change_view_without_object_change_permission(self):
"""
The object should be read-only if the user has permission to view it
and change objects of that type but not to change the current object.
"""
change_url = reverse("admin9:admin_views_article_change", args=(self.a1.pk,))
self.client.force_login(self.viewuser)
response = self.client.get(change_url)
self.assertEqual(response.context["title"], "View article")
self.assertContains(response, "<title>View article | Django site admin</title>")
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(
response,
'<a href="/test_admin/admin9/admin_views/article/" class="closelink">Close'
"</a>",
)
def test_change_view_save_as_new(self):
"""
'Save as new' should raise PermissionDenied for users without the 'add'
permission.
"""
change_dict_save_as_new = {
"_saveasnew": "Save as new",
"title": "Ikke fordømt",
"content": "<p>edited article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
"section": self.s1.pk,
}
article_change_url = reverse(
"admin:admin_views_article_change", args=(self.a1.pk,)
)
# Add user can perform "Save as new".
article_count = Article.objects.count()
self.client.force_login(self.adduser)
post = self.client.post(article_change_url, change_dict_save_as_new)
self.assertRedirects(post, self.index_url)
self.assertEqual(Article.objects.count(), article_count + 1)
self.client.logout()
# Change user cannot perform "Save as new" (no 'add' permission).
article_count = Article.objects.count()
self.client.force_login(self.changeuser)
post = self.client.post(article_change_url, change_dict_save_as_new)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), article_count)
# User with both add and change permissions should be redirected to the
# change page for the newly created object.
article_count = Article.objects.count()
self.client.force_login(self.superuser)
post = self.client.post(article_change_url, change_dict_save_as_new)
self.assertEqual(Article.objects.count(), article_count + 1)
new_article = Article.objects.latest("id")
self.assertRedirects(
post, reverse("admin:admin_views_article_change", args=(new_article.pk,))
)
def test_change_view_with_view_only_inlines(self):
"""
User with change permission to a section but view-only for inlines.
"""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("change", Section._meta))
)
self.client.force_login(self.viewuser)
# GET shows inlines.
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 3)
# Valid POST changes the name.
data = {
"name": "Can edit name with view-only inlines",
"article_set-TOTAL_FORMS": 3,
"article_set-INITIAL_FORMS": 3,
}
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Section.objects.get(pk=self.s1.pk).name, data["name"])
# Invalid POST reshows inlines.
del data["name"]
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 3)
def test_change_view_with_view_only_last_inline(self):
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("view", Section._meta))
)
self.client.force_login(self.viewuser)
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 3)
# The last inline is not marked as empty.
self.assertContains(response, 'id="article_set-2"')
def test_change_view_with_view_and_add_inlines(self):
"""User has view and add permissions on the inline model."""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("change", Section._meta))
)
self.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("add", Article._meta))
)
self.client.force_login(self.viewuser)
# GET shows inlines.
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 6)
# Valid POST creates a new article.
data = {
"name": "Can edit name with view-only inlines",
"article_set-TOTAL_FORMS": 6,
"article_set-INITIAL_FORMS": 3,
"article_set-3-id": [""],
"article_set-3-title": ["A title"],
"article_set-3-content": ["Added content"],
"article_set-3-date_0": ["2008-3-18"],
"article_set-3-date_1": ["11:54:58"],
"article_set-3-section": [str(self.s1.pk)],
}
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Section.objects.get(pk=self.s1.pk).name, data["name"])
self.assertEqual(Article.objects.count(), 4)
# Invalid POST reshows inlines.
del data["name"]
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 6)
def test_change_view_with_view_and_delete_inlines(self):
"""User has view and delete permissions on the inline model."""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("change", Section._meta))
)
self.client.force_login(self.viewuser)
data = {
"name": "Name is required.",
"article_set-TOTAL_FORMS": 6,
"article_set-INITIAL_FORMS": 3,
"article_set-0-id": [str(self.a1.pk)],
"article_set-0-DELETE": ["on"],
}
# Inline POST details are ignored without delete permission.
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Article.objects.count(), 3)
# Deletion successful when delete permission is added.
self.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("delete", Article._meta))
)
data = {
"name": "Name is required.",
"article_set-TOTAL_FORMS": 6,
"article_set-INITIAL_FORMS": 3,
"article_set-0-id": [str(self.a1.pk)],
"article_set-0-DELETE": ["on"],
}
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Article.objects.count(), 2)
def test_delete_view(self):
"""Delete view should restrict access and actually delete items."""
delete_dict = {"post": "yes"}
delete_url = reverse("admin:admin_views_article_delete", args=(self.a1.pk,))
# add user should not be able to delete articles
self.client.force_login(self.adduser)
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 403)
post = self.client.post(delete_url, delete_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.logout()
# view user should not be able to delete articles
self.client.force_login(self.viewuser)
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 403)
post = self.client.post(delete_url, delete_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.logout()
# Delete user can delete
self.client.force_login(self.deleteuser)
response = self.client.get(
reverse("admin:admin_views_section_delete", args=(self.s1.pk,))
)
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Articles: 3</li>")
# test response contains link to related Article
self.assertContains(response, "admin_views/article/%s/" % self.a1.pk)
response = self.client.get(delete_url)
self.assertContains(response, "admin_views/article/%s/" % self.a1.pk)
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Articles: 1</li>")
post = self.client.post(delete_url, delete_dict)
self.assertRedirects(post, self.index_url)
self.assertEqual(Article.objects.count(), 2)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "Greetings from a deleted object")
article_ct = ContentType.objects.get_for_model(Article)
logged = LogEntry.objects.get(content_type=article_ct, action_flag=DELETION)
self.assertEqual(logged.object_id, str(self.a1.pk))
def test_delete_view_with_no_default_permissions(self):
"""
The delete view allows users to delete collected objects without a
'delete' permission (ReadOnlyPizza.Meta.default_permissions is empty).
"""
pizza = ReadOnlyPizza.objects.create(name="Double Cheese")
delete_url = reverse("admin:admin_views_readonlypizza_delete", args=(pizza.pk,))
self.client.force_login(self.adduser)
response = self.client.get(delete_url)
self.assertContains(response, "admin_views/readonlypizza/%s/" % pizza.pk)
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Read only pizzas: 1</li>")
post = self.client.post(delete_url, {"post": "yes"})
self.assertRedirects(
post, reverse("admin:admin_views_readonlypizza_changelist")
)
self.assertEqual(ReadOnlyPizza.objects.count(), 0)
def test_delete_view_nonexistent_obj(self):
self.client.force_login(self.deleteuser)
url = reverse("admin:admin_views_article_delete", args=("nonexistent",))
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["article with ID “nonexistent” doesn’t exist. Perhaps it was deleted?"],
)
def test_history_view(self):
"""History view should restrict access."""
# add user should not be able to view the list of article or change any of them
self.client.force_login(self.adduser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=(self.a1.pk,))
)
self.assertEqual(response.status_code, 403)
self.client.post(reverse("admin:logout"))
# view user can view all items
self.client.force_login(self.viewuser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=(self.a1.pk,))
)
self.assertEqual(response.status_code, 200)
self.client.post(reverse("admin:logout"))
# change user can view all items and edit them
self.client.force_login(self.changeuser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=(self.a1.pk,))
)
self.assertEqual(response.status_code, 200)
# Test redirection when using row-level change permissions. Refs #11513.
rl1 = RowLevelChangePermissionModel.objects.create(id=1, name="odd id")
rl2 = RowLevelChangePermissionModel.objects.create(id=2, name="even id")
logins = [
self.superuser,
self.viewuser,
self.adduser,
self.changeuser,
self.deleteuser,
]
for login_user in logins:
with self.subTest(login_user.username):
self.client.force_login(login_user)
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl1.pk,),
)
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl2.pk,),
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.client.post(reverse("admin:logout"))
for login_user in [self.joepublicuser, self.nostaffuser]:
with self.subTest(login_user.username):
self.client.force_login(login_user)
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl1.pk,),
)
response = self.client.get(url, follow=True)
self.assertContains(response, "login-form")
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl2.pk,),
)
response = self.client.get(url, follow=True)
self.assertContains(response, "login-form")
self.client.post(reverse("admin:logout"))
def test_history_view_bad_url(self):
self.client.force_login(self.changeuser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=("foo",)), follow=True
)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["article with ID “foo” doesn’t exist. Perhaps it was deleted?"],
)
def test_conditionally_show_add_section_link(self):
"""
The foreign key widget should only show the "add related" button if the
user has permission to add that related item.
"""
self.client.force_login(self.adduser)
# The user can't add sections yet, so they shouldn't see the "add section" link.
url = reverse("admin:admin_views_article_add")
add_link_text = "add_id_section"
response = self.client.get(url)
self.assertNotContains(response, add_link_text)
# Allow the user to add sections too. Now they can see the "add section" link.
user = User.objects.get(username="adduser")
perm = get_perm(Section, get_permission_codename("add", Section._meta))
user.user_permissions.add(perm)
response = self.client.get(url)
self.assertContains(response, add_link_text)
def test_conditionally_show_change_section_link(self):
"""
The foreign key widget should only show the "change related" button if
the user has permission to change that related item.
"""
def get_change_related(response):
return (
response.context["adminform"]
.form.fields["section"]
.widget.can_change_related
)
self.client.force_login(self.adduser)
# The user can't change sections yet, so they shouldn't see the
# "change section" link.
url = reverse("admin:admin_views_article_add")
change_link_text = "change_id_section"
response = self.client.get(url)
self.assertFalse(get_change_related(response))
self.assertNotContains(response, change_link_text)
# Allow the user to change sections too. Now they can see the
# "change section" link.
user = User.objects.get(username="adduser")
perm = get_perm(Section, get_permission_codename("change", Section._meta))
user.user_permissions.add(perm)
response = self.client.get(url)
self.assertTrue(get_change_related(response))
self.assertContains(response, change_link_text)
def test_conditionally_show_delete_section_link(self):
"""
The foreign key widget should only show the "delete related" button if
the user has permission to delete that related item.
"""
def get_delete_related(response):
return (
response.context["adminform"]
.form.fields["sub_section"]
.widget.can_delete_related
)
self.client.force_login(self.adduser)
# The user can't delete sections yet, so they shouldn't see the
# "delete section" link.
url = reverse("admin:admin_views_article_add")
delete_link_text = "delete_id_sub_section"
response = self.client.get(url)
self.assertFalse(get_delete_related(response))
self.assertNotContains(response, delete_link_text)
# Allow the user to delete sections too. Now they can see the
# "delete section" link.
user = User.objects.get(username="adduser")
perm = get_perm(Section, get_permission_codename("delete", Section._meta))
user.user_permissions.add(perm)
response = self.client.get(url)
self.assertTrue(get_delete_related(response))
self.assertContains(response, delete_link_text)
def test_disabled_permissions_when_logged_in(self):
self.client.force_login(self.superuser)
superuser = User.objects.get(username="super")
superuser.is_active = False
superuser.save()
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, 'id="login-form"')
self.assertNotContains(response, "Log out")
response = self.client.get(reverse("secure_view"), follow=True)
self.assertContains(response, 'id="login-form"')
def test_disabled_staff_permissions_when_logged_in(self):
self.client.force_login(self.superuser)
superuser = User.objects.get(username="super")
superuser.is_staff = False
superuser.save()
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, 'id="login-form"')
self.assertNotContains(response, "Log out")
response = self.client.get(reverse("secure_view"), follow=True)
self.assertContains(response, 'id="login-form"')
def test_app_list_permissions(self):
"""
If a user has no module perms, the app list returns a 404.
"""
opts = Article._meta
change_user = User.objects.get(username="changeuser")
permission = get_perm(Article, get_permission_codename("change", opts))
self.client.force_login(self.changeuser)
# the user has no module permissions
change_user.user_permissions.remove(permission)
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertEqual(response.status_code, 404)
# the user now has module permissions
change_user.user_permissions.add(permission)
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertEqual(response.status_code, 200)
def test_shortcut_view_only_available_to_staff(self):
"""
Only admin users should be able to use the admin shortcut view.
"""
model_ctype = ContentType.objects.get_for_model(ModelWithStringPrimaryKey)
obj = ModelWithStringPrimaryKey.objects.create(string_pk="foo")
shortcut_url = reverse("admin:view_on_site", args=(model_ctype.pk, obj.pk))
# Not logged in: we should see the login page.
response = self.client.get(shortcut_url, follow=True)
self.assertTemplateUsed(response, "admin/login.html")
# Logged in? Redirect.
self.client.force_login(self.superuser)
response = self.client.get(shortcut_url, follow=False)
# Can't use self.assertRedirects() because User.get_absolute_url() is silly.
self.assertEqual(response.status_code, 302)
# Domain may depend on contrib.sites tests also run
self.assertRegex(response.url, "http://(testserver|example.com)/dummy/foo/")
def test_has_module_permission(self):
"""
has_module_permission() returns True for all users who
have any permission for that module (add, change, or delete), so that
the module is displayed on the admin index page.
"""
self.client.force_login(self.superuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.viewuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.adduser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.changeuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.deleteuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
def test_overriding_has_module_permission(self):
"""
If has_module_permission() always returns False, the module shouldn't
be displayed on the admin index page for any users.
"""
articles = Article._meta.verbose_name_plural.title()
sections = Section._meta.verbose_name_plural.title()
index_url = reverse("admin7:index")
self.client.force_login(self.superuser)
response = self.client.get(index_url)
self.assertContains(response, sections)
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.viewuser)
response = self.client.get(index_url)
self.assertNotContains(response, "admin_views")
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.adduser)
response = self.client.get(index_url)
self.assertNotContains(response, "admin_views")
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.changeuser)
response = self.client.get(index_url)
self.assertNotContains(response, "admin_views")
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.deleteuser)
response = self.client.get(index_url)
self.assertNotContains(response, articles)
# The app list displays Sections but not Articles as the latter has
# ModelAdmin.has_module_permission() = False.
self.client.force_login(self.superuser)
response = self.client.get(reverse("admin7:app_list", args=("admin_views",)))
self.assertContains(response, sections)
self.assertNotContains(response, articles)
def test_post_save_message_no_forbidden_links_visible(self):
"""
Post-save message shouldn't contain a link to the change form if the
user doesn't have the change permission.
"""
self.client.force_login(self.adduser)
# Emulate Article creation for user with add-only permission.
post_data = {
"title": "Fun & games",
"content": "Some content",
"date_0": "2015-10-31",
"date_1": "16:35:00",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_article_add"), post_data, follow=True
)
self.assertContains(
response,
'<li class="success">The article “Fun & games” was added successfully.'
"</li>",
html=True,
)
@override_settings(
ROOT_URLCONF="admin_views.urls",
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
class AdminViewProxyModelPermissionsTests(TestCase):
"""Tests for proxy models permissions in the admin."""
@classmethod
def setUpTestData(cls):
cls.viewuser = User.objects.create_user(
username="viewuser", password="secret", is_staff=True
)
cls.adduser = User.objects.create_user(
username="adduser", password="secret", is_staff=True
)
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.deleteuser = User.objects.create_user(
username="deleteuser", password="secret", is_staff=True
)
# Setup permissions.
opts = UserProxy._meta
cls.viewuser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("view", opts))
)
cls.adduser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("add", opts))
)
cls.changeuser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("change", opts))
)
cls.deleteuser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("delete", opts))
)
# UserProxy instances.
cls.user_proxy = UserProxy.objects.create(
username="user_proxy", password="secret"
)
def test_add(self):
self.client.force_login(self.adduser)
url = reverse("admin:admin_views_userproxy_add")
data = {
"username": "can_add",
"password": "secret",
"date_joined_0": "2019-01-15",
"date_joined_1": "16:59:10",
}
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(UserProxy.objects.filter(username="can_add").exists())
def test_view(self):
self.client.force_login(self.viewuser)
response = self.client.get(reverse("admin:admin_views_userproxy_changelist"))
self.assertContains(response, "<h1>Select user proxy to view</h1>")
response = self.client.get(
reverse("admin:admin_views_userproxy_change", args=(self.user_proxy.pk,))
)
self.assertContains(response, "<h1>View user proxy</h1>")
self.assertContains(response, '<div class="readonly">user_proxy</div>')
def test_change(self):
self.client.force_login(self.changeuser)
data = {
"password": self.user_proxy.password,
"username": self.user_proxy.username,
"date_joined_0": self.user_proxy.date_joined.strftime("%Y-%m-%d"),
"date_joined_1": self.user_proxy.date_joined.strftime("%H:%M:%S"),
"first_name": "first_name",
}
url = reverse("admin:admin_views_userproxy_change", args=(self.user_proxy.pk,))
response = self.client.post(url, data)
self.assertRedirects(
response, reverse("admin:admin_views_userproxy_changelist")
)
self.assertEqual(
UserProxy.objects.get(pk=self.user_proxy.pk).first_name, "first_name"
)
def test_delete(self):
self.client.force_login(self.deleteuser)
url = reverse("admin:admin_views_userproxy_delete", args=(self.user_proxy.pk,))
response = self.client.post(url, {"post": "yes"}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFalse(UserProxy.objects.filter(pk=self.user_proxy.pk).exists())
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewsNoUrlTest(TestCase):
"""Regression test for #17333"""
@classmethod
def setUpTestData(cls):
# User who can change Reports
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.changeuser.user_permissions.add(
get_perm(Report, get_permission_codename("change", Report._meta))
)
def test_no_standard_modeladmin_urls(self):
"""Admin index views don't break when user's ModelAdmin removes standard urls"""
self.client.force_login(self.changeuser)
r = self.client.get(reverse("admin:index"))
# we shouldn't get a 500 error caused by a NoReverseMatch
self.assertEqual(r.status_code, 200)
self.client.post(reverse("admin:logout"))
@skipUnlessDBFeature("can_defer_constraint_checks")
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewDeletedObjectsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.deleteuser = User.objects.create_user(
username="deleteuser", password="secret", is_staff=True
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.v1 = Villain.objects.create(name="Adam")
cls.v2 = Villain.objects.create(name="Sue")
cls.sv1 = SuperVillain.objects.create(name="Bob")
cls.pl1 = Plot.objects.create(
name="World Domination", team_leader=cls.v1, contact=cls.v2
)
cls.pl2 = Plot.objects.create(
name="World Peace", team_leader=cls.v2, contact=cls.v2
)
cls.pl3 = Plot.objects.create(
name="Corn Conspiracy", team_leader=cls.v1, contact=cls.v1
)
cls.pd1 = PlotDetails.objects.create(details="almost finished", plot=cls.pl1)
cls.sh1 = SecretHideout.objects.create(
location="underground bunker", villain=cls.v1
)
cls.sh2 = SecretHideout.objects.create(
location="floating castle", villain=cls.sv1
)
cls.ssh1 = SuperSecretHideout.objects.create(
location="super floating castle!", supervillain=cls.sv1
)
cls.cy1 = CyclicOne.objects.create(name="I am recursive", two_id=1)
cls.cy2 = CyclicTwo.objects.create(name="I am recursive too", one_id=1)
def setUp(self):
self.client.force_login(self.superuser)
def test_nesting(self):
"""
Objects should be nested to display the relationships that
cause them to be scheduled for deletion.
"""
pattern = re.compile(
r'<li>Plot: <a href="%s">World Domination</a>\s*<ul>\s*'
r'<li>Plot details: <a href="%s">almost finished</a>'
% (
reverse("admin:admin_views_plot_change", args=(self.pl1.pk,)),
reverse("admin:admin_views_plotdetails_change", args=(self.pd1.pk,)),
)
)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v1.pk,))
)
self.assertRegex(response.content.decode(), pattern)
def test_cyclic(self):
"""
Cyclic relationships should still cause each object to only be
listed once.
"""
one = '<li>Cyclic one: <a href="%s">I am recursive</a>' % (
reverse("admin:admin_views_cyclicone_change", args=(self.cy1.pk,)),
)
two = '<li>Cyclic two: <a href="%s">I am recursive too</a>' % (
reverse("admin:admin_views_cyclictwo_change", args=(self.cy2.pk,)),
)
response = self.client.get(
reverse("admin:admin_views_cyclicone_delete", args=(self.cy1.pk,))
)
self.assertContains(response, one, 1)
self.assertContains(response, two, 1)
def test_perms_needed(self):
self.client.logout()
delete_user = User.objects.get(username="deleteuser")
delete_user.user_permissions.add(
get_perm(Plot, get_permission_codename("delete", Plot._meta))
)
self.client.force_login(self.deleteuser)
response = self.client.get(
reverse("admin:admin_views_plot_delete", args=(self.pl1.pk,))
)
self.assertContains(
response,
"your account doesn't have permission to delete the following types of "
"objects",
)
self.assertContains(response, "<li>plot details</li>")
def test_protected(self):
q = Question.objects.create(question="Why?")
a1 = Answer.objects.create(question=q, answer="Because.")
a2 = Answer.objects.create(question=q, answer="Yes.")
response = self.client.get(
reverse("admin:admin_views_question_delete", args=(q.pk,))
)
self.assertContains(
response, "would require deleting the following protected related objects"
)
self.assertContains(
response,
'<li>Answer: <a href="%s">Because.</a></li>'
% reverse("admin:admin_views_answer_change", args=(a1.pk,)),
)
self.assertContains(
response,
'<li>Answer: <a href="%s">Yes.</a></li>'
% reverse("admin:admin_views_answer_change", args=(a2.pk,)),
)
def test_post_delete_protected(self):
"""
A POST request to delete protected objects should display the page
which says the deletion is prohibited.
"""
q = Question.objects.create(question="Why?")
Answer.objects.create(question=q, answer="Because.")
response = self.client.post(
reverse("admin:admin_views_question_delete", args=(q.pk,)), {"post": "yes"}
)
self.assertEqual(Question.objects.count(), 1)
self.assertContains(
response, "would require deleting the following protected related objects"
)
def test_restricted(self):
album = Album.objects.create(title="Amaryllis")
song = Song.objects.create(album=album, name="Unity")
response = self.client.get(
reverse("admin:admin_views_album_delete", args=(album.pk,))
)
self.assertContains(
response,
"would require deleting the following protected related objects",
)
self.assertContains(
response,
'<li>Song: <a href="%s">Unity</a></li>'
% reverse("admin:admin_views_song_change", args=(song.pk,)),
)
def test_post_delete_restricted(self):
album = Album.objects.create(title="Amaryllis")
Song.objects.create(album=album, name="Unity")
response = self.client.post(
reverse("admin:admin_views_album_delete", args=(album.pk,)),
{"post": "yes"},
)
self.assertEqual(Album.objects.count(), 1)
self.assertContains(
response,
"would require deleting the following protected related objects",
)
def test_not_registered(self):
should_contain = """<li>Secret hideout: underground bunker"""
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v1.pk,))
)
self.assertContains(response, should_contain, 1)
def test_multiple_fkeys_to_same_model(self):
"""
If a deleted object has two relationships from another model,
both of those should be followed in looking for related
objects to delete.
"""
should_contain = '<li>Plot: <a href="%s">World Domination</a>' % reverse(
"admin:admin_views_plot_change", args=(self.pl1.pk,)
)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v1.pk,))
)
self.assertContains(response, should_contain)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v2.pk,))
)
self.assertContains(response, should_contain)
def test_multiple_fkeys_to_same_instance(self):
"""
If a deleted object has two relationships pointing to it from
another object, the other object should still only be listed
once.
"""
should_contain = '<li>Plot: <a href="%s">World Peace</a></li>' % reverse(
"admin:admin_views_plot_change", args=(self.pl2.pk,)
)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v2.pk,))
)
self.assertContains(response, should_contain, 1)
def test_inheritance(self):
"""
In the case of an inherited model, if either the child or
parent-model instance is deleted, both instances are listed
for deletion, as well as any relationships they have.
"""
should_contain = [
'<li>Villain: <a href="%s">Bob</a>'
% reverse("admin:admin_views_villain_change", args=(self.sv1.pk,)),
'<li>Super villain: <a href="%s">Bob</a>'
% reverse("admin:admin_views_supervillain_change", args=(self.sv1.pk,)),
"<li>Secret hideout: floating castle",
"<li>Super secret hideout: super floating castle!",
]
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.sv1.pk,))
)
for should in should_contain:
self.assertContains(response, should, 1)
response = self.client.get(
reverse("admin:admin_views_supervillain_delete", args=(self.sv1.pk,))
)
for should in should_contain:
self.assertContains(response, should, 1)
def test_generic_relations(self):
"""
If a deleted object has GenericForeignKeys pointing to it,
those objects should be listed for deletion.
"""
plot = self.pl3
tag = FunkyTag.objects.create(content_object=plot, name="hott")
should_contain = '<li>Funky tag: <a href="%s">hott' % reverse(
"admin:admin_views_funkytag_change", args=(tag.id,)
)
response = self.client.get(
reverse("admin:admin_views_plot_delete", args=(plot.pk,))
)
self.assertContains(response, should_contain)
def test_generic_relations_with_related_query_name(self):
"""
If a deleted object has GenericForeignKey with
GenericRelation(related_query_name='...') pointing to it, those objects
should be listed for deletion.
"""
bookmark = Bookmark.objects.create(name="djangoproject")
tag = FunkyTag.objects.create(content_object=bookmark, name="django")
tag_url = reverse("admin:admin_views_funkytag_change", args=(tag.id,))
should_contain = '<li>Funky tag: <a href="%s">django' % tag_url
response = self.client.get(
reverse("admin:admin_views_bookmark_delete", args=(bookmark.pk,))
)
self.assertContains(response, should_contain)
def test_delete_view_uses_get_deleted_objects(self):
"""The delete view uses ModelAdmin.get_deleted_objects()."""
book = Book.objects.create(name="Test Book")
response = self.client.get(
reverse("admin2:admin_views_book_delete", args=(book.pk,))
)
# BookAdmin.get_deleted_objects() returns custom text.
self.assertContains(response, "a deletable object")
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestGenericRelations(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.v1 = Villain.objects.create(name="Adam")
cls.pl3 = Plot.objects.create(
name="Corn Conspiracy", team_leader=cls.v1, contact=cls.v1
)
def setUp(self):
self.client.force_login(self.superuser)
def test_generic_content_object_in_list_display(self):
FunkyTag.objects.create(content_object=self.pl3, name="hott")
response = self.client.get(reverse("admin:admin_views_funkytag_changelist"))
self.assertContains(response, "%s</td>" % self.pl3)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewStringPrimaryKeyTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.pk = (
"abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 "
r"""-_.!~*'() ;/?:@&=+$, <>#%" {}|\^[]`"""
)
cls.m1 = ModelWithStringPrimaryKey.objects.create(string_pk=cls.pk)
content_type_pk = ContentType.objects.get_for_model(
ModelWithStringPrimaryKey
).pk
user_pk = cls.superuser.pk
LogEntry.objects.log_action(
user_pk,
content_type_pk,
cls.pk,
cls.pk,
2,
change_message="Changed something",
)
def setUp(self):
self.client.force_login(self.superuser)
def test_get_history_view(self):
"""
Retrieving the history for an object using urlencoded form of primary
key should work.
Refs #12349, #18550.
"""
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_history", args=(self.pk,)
)
)
self.assertContains(response, escape(self.pk))
self.assertContains(response, "Changed something")
def test_get_change_view(self):
"Retrieving the object using urlencoded form of primary key should work"
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change", args=(self.pk,)
)
)
self.assertContains(response, escape(self.pk))
def test_changelist_to_changeform_link(self):
"""
Link to the changeform of the object in changelist should use reverse()
and be quoted.
"""
response = self.client.get(
reverse("admin:admin_views_modelwithstringprimarykey_changelist")
)
# this URL now comes through reverse(), thus url quoting and iri_to_uri encoding
pk_final_url = escape(iri_to_uri(quote(self.pk)))
change_url = reverse(
"admin:admin_views_modelwithstringprimarykey_change", args=("__fk__",)
).replace("__fk__", pk_final_url)
should_contain = '<th class="field-__str__"><a href="%s">%s</a></th>' % (
change_url,
escape(self.pk),
)
self.assertContains(response, should_contain)
def test_recentactions_link(self):
"""
The link from the recent actions list referring to the changeform of
the object should be quoted.
"""
response = self.client.get(reverse("admin:index"))
link = reverse(
"admin:admin_views_modelwithstringprimarykey_change", args=(quote(self.pk),)
)
should_contain = """<a href="%s">%s</a>""" % (escape(link), escape(self.pk))
self.assertContains(response, should_contain)
def test_deleteconfirmation_link(self):
""" "
The link from the delete confirmation page referring back to the
changeform of the object should be quoted.
"""
url = reverse(
"admin:admin_views_modelwithstringprimarykey_delete", args=(quote(self.pk),)
)
response = self.client.get(url)
# this URL now comes through reverse(), thus url quoting and iri_to_uri encoding
change_url = reverse(
"admin:admin_views_modelwithstringprimarykey_change", args=("__fk__",)
).replace("__fk__", escape(iri_to_uri(quote(self.pk))))
should_contain = '<a href="%s">%s</a>' % (change_url, escape(self.pk))
self.assertContains(response, should_contain)
def test_url_conflicts_with_add(self):
"A model with a primary key that ends with add or is `add` should be visible"
add_model = ModelWithStringPrimaryKey.objects.create(
pk="i have something to add"
)
add_model.save()
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(add_model.pk),),
)
)
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
add_model2 = ModelWithStringPrimaryKey.objects.create(pk="add")
add_url = reverse("admin:admin_views_modelwithstringprimarykey_add")
change_url = reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(add_model2.pk),),
)
self.assertNotEqual(add_url, change_url)
def test_url_conflicts_with_delete(self):
"A model with a primary key that ends with delete should be visible"
delete_model = ModelWithStringPrimaryKey(pk="delete")
delete_model.save()
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(delete_model.pk),),
)
)
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
def test_url_conflicts_with_history(self):
"A model with a primary key that ends with history should be visible"
history_model = ModelWithStringPrimaryKey(pk="history")
history_model.save()
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(history_model.pk),),
)
)
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
def test_shortcut_view_with_escaping(self):
"'View on site should' work properly with char fields"
model = ModelWithStringPrimaryKey(pk="abc_123")
model.save()
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(model.pk),),
)
)
should_contain = '/%s/" class="viewsitelink">' % model.pk
self.assertContains(response, should_contain)
def test_change_view_history_link(self):
"""Object history button link should work and contain the pk value quoted."""
url = reverse(
"admin:%s_modelwithstringprimarykey_change"
% ModelWithStringPrimaryKey._meta.app_label,
args=(quote(self.pk),),
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
expected_link = reverse(
"admin:%s_modelwithstringprimarykey_history"
% ModelWithStringPrimaryKey._meta.app_label,
args=(quote(self.pk),),
)
self.assertContains(
response, '<a href="%s" class="historylink"' % escape(expected_link)
)
def test_redirect_on_add_view_continue_button(self):
"""As soon as an object is added using "Save and continue editing"
button, the user should be redirected to the object's change_view.
In case primary key is a string containing some special characters
like slash or underscore, these characters must be escaped (see #22266)
"""
response = self.client.post(
reverse("admin:admin_views_modelwithstringprimarykey_add"),
{
"string_pk": "123/history",
"_continue": "1", # Save and continue editing
},
)
self.assertEqual(response.status_code, 302) # temporary redirect
self.assertIn("/123_2Fhistory/", response.headers["location"]) # PK is quoted
@override_settings(ROOT_URLCONF="admin_views.urls")
class SecureViewTests(TestCase):
"""
Test behavior of a view protected by the staff_member_required decorator.
"""
def test_secure_view_shows_login_if_not_logged_in(self):
secure_url = reverse("secure_view")
response = self.client.get(secure_url)
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin:login"), secure_url)
)
response = self.client.get(secure_url, follow=True)
self.assertTemplateUsed(response, "admin/login.html")
self.assertEqual(response.context[REDIRECT_FIELD_NAME], secure_url)
def test_staff_member_required_decorator_works_with_argument(self):
"""
Staff_member_required decorator works with an argument
(redirect_field_name).
"""
secure_url = "/test_admin/admin/secure-view2/"
response = self.client.get(secure_url)
self.assertRedirects(
response, "%s?myfield=%s" % (reverse("admin:login"), secure_url)
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewUnicodeTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.b1 = Book.objects.create(name="Lærdommer")
cls.p1 = Promo.objects.create(name="<Promo for Lærdommer>", book=cls.b1)
cls.chap1 = Chapter.objects.create(
title="Norske bostaver æøå skaper problemer",
content="<p>Svært frustrerende med UnicodeDecodeErro</p>",
book=cls.b1,
)
cls.chap2 = Chapter.objects.create(
title="Kjærlighet",
content="<p>La kjærligheten til de lidende seire.</p>",
book=cls.b1,
)
cls.chap3 = Chapter.objects.create(
title="Kjærlighet", content="<p>Noe innhold</p>", book=cls.b1
)
cls.chap4 = ChapterXtra1.objects.create(
chap=cls.chap1, xtra="<Xtra(1) Norske bostaver æøå skaper problemer>"
)
cls.chap5 = ChapterXtra1.objects.create(
chap=cls.chap2, xtra="<Xtra(1) Kjærlighet>"
)
cls.chap6 = ChapterXtra1.objects.create(
chap=cls.chap3, xtra="<Xtra(1) Kjærlighet>"
)
cls.chap7 = ChapterXtra2.objects.create(
chap=cls.chap1, xtra="<Xtra(2) Norske bostaver æøå skaper problemer>"
)
cls.chap8 = ChapterXtra2.objects.create(
chap=cls.chap2, xtra="<Xtra(2) Kjærlighet>"
)
cls.chap9 = ChapterXtra2.objects.create(
chap=cls.chap3, xtra="<Xtra(2) Kjærlighet>"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_unicode_edit(self):
"""
A test to ensure that POST on edit_view handles non-ASCII characters.
"""
post_data = {
"name": "Test lærdommer",
# inline data
"chapter_set-TOTAL_FORMS": "6",
"chapter_set-INITIAL_FORMS": "3",
"chapter_set-MAX_NUM_FORMS": "0",
"chapter_set-0-id": self.chap1.pk,
"chapter_set-0-title": "Norske bostaver æøå skaper problemer",
"chapter_set-0-content": (
"<p>Svært frustrerende med UnicodeDecodeError</p>"
),
"chapter_set-1-id": self.chap2.id,
"chapter_set-1-title": "Kjærlighet.",
"chapter_set-1-content": (
"<p>La kjærligheten til de lidende seire.</p>"
),
"chapter_set-2-id": self.chap3.id,
"chapter_set-2-title": "Need a title.",
"chapter_set-2-content": "<p>Newest content</p>",
"chapter_set-3-id": "",
"chapter_set-3-title": "",
"chapter_set-3-content": "",
"chapter_set-4-id": "",
"chapter_set-4-title": "",
"chapter_set-4-content": "",
"chapter_set-5-id": "",
"chapter_set-5-title": "",
"chapter_set-5-content": "",
}
response = self.client.post(
reverse("admin:admin_views_book_change", args=(self.b1.pk,)), post_data
)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_unicode_delete(self):
"""
The delete_view handles non-ASCII characters
"""
delete_dict = {"post": "yes"}
delete_url = reverse("admin:admin_views_book_delete", args=(self.b1.pk,))
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(delete_url, delete_dict)
self.assertRedirects(response, reverse("admin:admin_views_book_changelist"))
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewListEditable(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.per1 = Person.objects.create(name="John Mauchly", gender=1, alive=True)
cls.per2 = Person.objects.create(name="Grace Hopper", gender=1, alive=False)
cls.per3 = Person.objects.create(name="Guido van Rossum", gender=1, alive=True)
def setUp(self):
self.client.force_login(self.superuser)
def test_inheritance(self):
Podcast.objects.create(
name="This Week in Django", release_date=datetime.date.today()
)
response = self.client.get(reverse("admin:admin_views_podcast_changelist"))
self.assertEqual(response.status_code, 200)
def test_inheritance_2(self):
Vodcast.objects.create(name="This Week in Django", released=True)
response = self.client.get(reverse("admin:admin_views_vodcast_changelist"))
self.assertEqual(response.status_code, 200)
def test_custom_pk(self):
Language.objects.create(iso="en", name="English", english_name="English")
response = self.client.get(reverse("admin:admin_views_language_changelist"))
self.assertEqual(response.status_code, 200)
def test_changelist_input_html(self):
response = self.client.get(reverse("admin:admin_views_person_changelist"))
# 2 inputs per object(the field and the hidden id field) = 6
# 4 management hidden fields = 4
# 4 action inputs (3 regular checkboxes, 1 checkbox to select all)
# main form submit button = 1
# search field and search submit button = 2
# CSRF field = 2
# field to track 'select all' across paginated views = 1
# 6 + 4 + 4 + 1 + 2 + 2 + 1 = 20 inputs
self.assertContains(response, "<input", count=21)
# 1 select per object = 3 selects
self.assertContains(response, "<select", count=4)
def test_post_messages(self):
# Ticket 12707: Saving inline editable should not show admin
# action warnings
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": str(self.per1.pk),
"form-1-gender": "2",
"form-1-id": str(self.per2.pk),
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": str(self.per3.pk),
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_person_changelist"), data, follow=True
)
self.assertEqual(len(response.context["messages"]), 1)
def test_post_submission(self):
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": str(self.per1.pk),
"form-1-gender": "2",
"form-1-id": str(self.per2.pk),
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": str(self.per3.pk),
"_save": "Save",
}
self.client.post(reverse("admin:admin_views_person_changelist"), data)
self.assertIs(Person.objects.get(name="John Mauchly").alive, False)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 2)
# test a filtered page
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "2",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(self.per1.pk),
"form-0-gender": "1",
"form-0-alive": "checked",
"form-1-id": str(self.per3.pk),
"form-1-gender": "1",
"form-1-alive": "checked",
"_save": "Save",
}
self.client.post(
reverse("admin:admin_views_person_changelist") + "?gender__exact=1", data
)
self.assertIs(Person.objects.get(name="John Mauchly").alive, True)
# test a searched page
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(self.per1.pk),
"form-0-gender": "1",
"_save": "Save",
}
self.client.post(
reverse("admin:admin_views_person_changelist") + "?q=john", data
)
self.assertIs(Person.objects.get(name="John Mauchly").alive, False)
def test_non_field_errors(self):
"""
Non-field errors are displayed for each of the forms in the
changelist's formset.
"""
fd1 = FoodDelivery.objects.create(
reference="123", driver="bill", restaurant="thai"
)
fd2 = FoodDelivery.objects.create(
reference="456", driver="bill", restaurant="india"
)
fd3 = FoodDelivery.objects.create(
reference="789", driver="bill", restaurant="pizza"
)
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(fd1.id),
"form-0-reference": "123",
"form-0-driver": "bill",
"form-0-restaurant": "thai",
# Same data as above: Forbidden because of unique_together!
"form-1-id": str(fd2.id),
"form-1-reference": "456",
"form-1-driver": "bill",
"form-1-restaurant": "thai",
"form-2-id": str(fd3.id),
"form-2-reference": "789",
"form-2-driver": "bill",
"form-2-restaurant": "pizza",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_fooddelivery_changelist"), data
)
self.assertContains(
response,
'<tr><td colspan="4"><ul class="errorlist nonfield"><li>Food delivery '
"with this Driver and Restaurant already exists.</li></ul></td></tr>",
1,
html=True,
)
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(fd1.id),
"form-0-reference": "123",
"form-0-driver": "bill",
"form-0-restaurant": "thai",
# Same data as above: Forbidden because of unique_together!
"form-1-id": str(fd2.id),
"form-1-reference": "456",
"form-1-driver": "bill",
"form-1-restaurant": "thai",
# Same data also.
"form-2-id": str(fd3.id),
"form-2-reference": "789",
"form-2-driver": "bill",
"form-2-restaurant": "thai",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_fooddelivery_changelist"), data
)
self.assertContains(
response,
'<tr><td colspan="4"><ul class="errorlist nonfield"><li>Food delivery '
"with this Driver and Restaurant already exists.</li></ul></td></tr>",
2,
html=True,
)
def test_non_form_errors(self):
# test if non-form errors are handled; ticket #12716
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(self.per2.pk),
"form-0-alive": "1",
"form-0-gender": "2",
# The form processing understands this as a list_editable "Save"
# and not an action "Go".
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_person_changelist"), data
)
self.assertContains(response, "Grace is not a Zombie")
def test_non_form_errors_is_errorlist(self):
# test if non-form errors are correctly handled; ticket #12878
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(self.per2.pk),
"form-0-alive": "1",
"form-0-gender": "2",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_person_changelist"), data
)
non_form_errors = response.context["cl"].formset.non_form_errors()
self.assertIsInstance(non_form_errors, ErrorList)
self.assertEqual(
str(non_form_errors),
str(ErrorList(["Grace is not a Zombie"], error_class="nonform")),
)
def test_list_editable_ordering(self):
collector = Collector.objects.create(id=1, name="Frederick Clegg")
Category.objects.create(id=1, order=1, collector=collector)
Category.objects.create(id=2, order=2, collector=collector)
Category.objects.create(id=3, order=0, collector=collector)
Category.objects.create(id=4, order=0, collector=collector)
# NB: The order values must be changed so that the items are reordered.
data = {
"form-TOTAL_FORMS": "4",
"form-INITIAL_FORMS": "4",
"form-MAX_NUM_FORMS": "0",
"form-0-order": "14",
"form-0-id": "1",
"form-0-collector": "1",
"form-1-order": "13",
"form-1-id": "2",
"form-1-collector": "1",
"form-2-order": "1",
"form-2-id": "3",
"form-2-collector": "1",
"form-3-order": "0",
"form-3-id": "4",
"form-3-collector": "1",
# The form processing understands this as a list_editable "Save"
# and not an action "Go".
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_category_changelist"), data
)
# Successful post will redirect
self.assertEqual(response.status_code, 302)
# The order values have been applied to the right objects
self.assertEqual(Category.objects.get(id=1).order, 14)
self.assertEqual(Category.objects.get(id=2).order, 13)
self.assertEqual(Category.objects.get(id=3).order, 1)
self.assertEqual(Category.objects.get(id=4).order, 0)
def test_list_editable_pagination(self):
"""
Pagination works for list_editable items.
"""
UnorderedObject.objects.create(id=1, name="Unordered object #1")
UnorderedObject.objects.create(id=2, name="Unordered object #2")
UnorderedObject.objects.create(id=3, name="Unordered object #3")
response = self.client.get(
reverse("admin:admin_views_unorderedobject_changelist")
)
self.assertContains(response, "Unordered object #3")
self.assertContains(response, "Unordered object #2")
self.assertNotContains(response, "Unordered object #1")
response = self.client.get(
reverse("admin:admin_views_unorderedobject_changelist") + "?p=2"
)
self.assertNotContains(response, "Unordered object #3")
self.assertNotContains(response, "Unordered object #2")
self.assertContains(response, "Unordered object #1")
def test_list_editable_action_submit(self):
# List editable changes should not be executed if the action "Go" button is
# used to submit the form.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "1",
"form-1-gender": "2",
"form-1-id": "2",
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "3",
"index": "0",
"_selected_action": ["3"],
"action": ["", "delete_selected"],
}
self.client.post(reverse("admin:admin_views_person_changelist"), data)
self.assertIs(Person.objects.get(name="John Mauchly").alive, True)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 1)
def test_list_editable_action_choices(self):
# List editable changes should be executed if the "Save" button is
# used to submit the form - any action choices should be ignored.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": str(self.per1.pk),
"form-1-gender": "2",
"form-1-id": str(self.per2.pk),
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": str(self.per3.pk),
"_save": "Save",
"_selected_action": ["1"],
"action": ["", "delete_selected"],
}
self.client.post(reverse("admin:admin_views_person_changelist"), data)
self.assertIs(Person.objects.get(name="John Mauchly").alive, False)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 2)
def test_list_editable_popup(self):
"""
Fields should not be list-editable in popups.
"""
response = self.client.get(reverse("admin:admin_views_person_changelist"))
self.assertNotEqual(response.context["cl"].list_editable, ())
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?%s" % IS_POPUP_VAR
)
self.assertEqual(response.context["cl"].list_editable, ())
def test_pk_hidden_fields(self):
"""
hidden pk fields aren't displayed in the table body and their
corresponding human-readable value is displayed instead. The hidden pk
fields are displayed but separately (not in the table) and only once.
"""
story1 = Story.objects.create(
title="The adventures of Guido", content="Once upon a time in Djangoland..."
)
story2 = Story.objects.create(
title="Crouching Tiger, Hidden Python",
content="The Python was sneaking into...",
)
response = self.client.get(reverse("admin:admin_views_story_changelist"))
# Only one hidden field, in a separate place than the table.
self.assertContains(response, 'id="id_form-0-id"', 1)
self.assertContains(response, 'id="id_form-1-id"', 1)
self.assertContains(
response,
'<div class="hiddenfields">\n'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id">'
'<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id">\n'
"</div>" % (story2.id, story1.id),
html=True,
)
self.assertContains(response, '<td class="field-id">%d</td>' % story1.id, 1)
self.assertContains(response, '<td class="field-id">%d</td>' % story2.id, 1)
def test_pk_hidden_fields_with_list_display_links(self):
"""Similarly as test_pk_hidden_fields, but when the hidden pk fields are
referenced in list_display_links.
Refs #12475.
"""
story1 = OtherStory.objects.create(
title="The adventures of Guido",
content="Once upon a time in Djangoland...",
)
story2 = OtherStory.objects.create(
title="Crouching Tiger, Hidden Python",
content="The Python was sneaking into...",
)
link1 = reverse("admin:admin_views_otherstory_change", args=(story1.pk,))
link2 = reverse("admin:admin_views_otherstory_change", args=(story2.pk,))
response = self.client.get(reverse("admin:admin_views_otherstory_changelist"))
# Only one hidden field, in a separate place than the table.
self.assertContains(response, 'id="id_form-0-id"', 1)
self.assertContains(response, 'id="id_form-1-id"', 1)
self.assertContains(
response,
'<div class="hiddenfields">\n'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id">'
'<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id">\n'
"</div>" % (story2.id, story1.id),
html=True,
)
self.assertContains(
response,
'<th class="field-id"><a href="%s">%d</a></th>' % (link1, story1.id),
1,
)
self.assertContains(
response,
'<th class="field-id"><a href="%s">%d</a></th>' % (link2, story2.id),
1,
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminSearchTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.joepublicuser = User.objects.create_user(
username="joepublic", password="secret"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.per1 = Person.objects.create(name="John Mauchly", gender=1, alive=True)
cls.per2 = Person.objects.create(name="Grace Hopper", gender=1, alive=False)
cls.per3 = Person.objects.create(name="Guido van Rossum", gender=1, alive=True)
Person.objects.create(name="John Doe", gender=1)
Person.objects.create(name='John O"Hara', gender=1)
Person.objects.create(name="John O'Hara", gender=1)
cls.t1 = Recommender.objects.create()
cls.t2 = Recommendation.objects.create(the_recommender=cls.t1)
cls.t3 = Recommender.objects.create()
cls.t4 = Recommendation.objects.create(the_recommender=cls.t3)
cls.tt1 = TitleTranslation.objects.create(title=cls.t1, text="Bar")
cls.tt2 = TitleTranslation.objects.create(title=cls.t2, text="Foo")
cls.tt3 = TitleTranslation.objects.create(title=cls.t3, text="Few")
cls.tt4 = TitleTranslation.objects.create(title=cls.t4, text="Bas")
def setUp(self):
self.client.force_login(self.superuser)
def test_search_on_sibling_models(self):
"A search that mentions sibling models"
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=bar"
)
# confirm the search returned 1 object
self.assertContains(response, "\n1 recommendation\n")
def test_with_fk_to_field(self):
"""
The to_field GET parameter is preserved when a search is performed.
Refs #10918.
"""
response = self.client.get(
reverse("admin:auth_user_changelist") + "?q=joe&%s=id" % TO_FIELD_VAR
)
self.assertContains(response, "\n1 user\n")
self.assertContains(
response,
'<input type="hidden" name="%s" value="id">' % TO_FIELD_VAR,
html=True,
)
def test_exact_matches(self):
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=bar"
)
# confirm the search returned one object
self.assertContains(response, "\n1 recommendation\n")
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=ba"
)
# confirm the search returned zero objects
self.assertContains(response, "\n0 recommendations\n")
def test_beginning_matches(self):
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?q=Gui"
)
# confirm the search returned one object
self.assertContains(response, "\n1 person\n")
self.assertContains(response, "Guido")
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?q=uido"
)
# confirm the search returned zero objects
self.assertContains(response, "\n0 persons\n")
self.assertNotContains(response, "Guido")
def test_pluggable_search(self):
PluggableSearchPerson.objects.create(name="Bob", age=10)
PluggableSearchPerson.objects.create(name="Amy", age=20)
response = self.client.get(
reverse("admin:admin_views_pluggablesearchperson_changelist") + "?q=Bob"
)
# confirm the search returned one object
self.assertContains(response, "\n1 pluggable search person\n")
self.assertContains(response, "Bob")
response = self.client.get(
reverse("admin:admin_views_pluggablesearchperson_changelist") + "?q=20"
)
# confirm the search returned one object
self.assertContains(response, "\n1 pluggable search person\n")
self.assertContains(response, "Amy")
def test_reset_link(self):
"""
Test presence of reset link in search bar ("1 result (_x total_)").
"""
# 1 query for session + 1 for fetching user
# + 1 for filtered result + 1 for filtered count
# + 1 for total count
with self.assertNumQueries(5):
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?q=Gui"
)
self.assertContains(
response,
"""<span class="small quiet">1 result (<a href="?">6 total</a>)</span>""",
html=True,
)
def test_no_total_count(self):
"""
#8408 -- "Show all" should be displayed instead of the total count if
ModelAdmin.show_full_result_count is False.
"""
# 1 query for session + 1 for fetching user
# + 1 for filtered result + 1 for filtered count
with self.assertNumQueries(4):
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=bar"
)
self.assertContains(
response,
"""<span class="small quiet">1 result (<a href="?">Show all</a>)</span>""",
html=True,
)
self.assertTrue(response.context["cl"].show_admin_actions)
def test_search_with_spaces(self):
url = reverse("admin:admin_views_person_changelist") + "?q=%s"
tests = [
('"John Doe"', 1),
("'John Doe'", 1),
("John Doe", 0),
('"John Doe" John', 1),
("'John Doe' John", 1),
("John Doe John", 0),
('"John Do"', 1),
("'John Do'", 1),
("'John O'Hara'", 0),
("'John O\\'Hara'", 1),
('"John O"Hara"', 0),
('"John O\\"Hara"', 1),
]
for search, hits in tests:
with self.subTest(search=search):
response = self.client.get(url % search)
self.assertContains(response, "\n%s person" % hits)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminInheritedInlinesTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_inline(self):
"""
Inline models which inherit from a common parent are correctly handled.
"""
foo_user = "foo username"
bar_user = "bar username"
name_re = re.compile(b'name="(.*?)"')
# test the add case
response = self.client.get(reverse("admin:admin_views_persona_add"))
names = name_re.findall(response.content)
names.remove(b"csrfmiddlewaretoken")
# make sure we have no duplicate HTML names
self.assertEqual(len(names), len(set(names)))
# test the add case
post_data = {
"name": "Test Name",
# inline data
"accounts-TOTAL_FORMS": "1",
"accounts-INITIAL_FORMS": "0",
"accounts-MAX_NUM_FORMS": "0",
"accounts-0-username": foo_user,
"accounts-2-TOTAL_FORMS": "1",
"accounts-2-INITIAL_FORMS": "0",
"accounts-2-MAX_NUM_FORMS": "0",
"accounts-2-0-username": bar_user,
}
response = self.client.post(reverse("admin:admin_views_persona_add"), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
self.assertEqual(Persona.objects.count(), 1)
self.assertEqual(FooAccount.objects.count(), 1)
self.assertEqual(BarAccount.objects.count(), 1)
self.assertEqual(FooAccount.objects.all()[0].username, foo_user)
self.assertEqual(BarAccount.objects.all()[0].username, bar_user)
self.assertEqual(Persona.objects.all()[0].accounts.count(), 2)
persona_id = Persona.objects.all()[0].id
foo_id = FooAccount.objects.all()[0].id
bar_id = BarAccount.objects.all()[0].id
# test the edit case
response = self.client.get(
reverse("admin:admin_views_persona_change", args=(persona_id,))
)
names = name_re.findall(response.content)
names.remove(b"csrfmiddlewaretoken")
# make sure we have no duplicate HTML names
self.assertEqual(len(names), len(set(names)))
post_data = {
"name": "Test Name",
"accounts-TOTAL_FORMS": "2",
"accounts-INITIAL_FORMS": "1",
"accounts-MAX_NUM_FORMS": "0",
"accounts-0-username": "%s-1" % foo_user,
"accounts-0-account_ptr": str(foo_id),
"accounts-0-persona": str(persona_id),
"accounts-2-TOTAL_FORMS": "2",
"accounts-2-INITIAL_FORMS": "1",
"accounts-2-MAX_NUM_FORMS": "0",
"accounts-2-0-username": "%s-1" % bar_user,
"accounts-2-0-account_ptr": str(bar_id),
"accounts-2-0-persona": str(persona_id),
}
response = self.client.post(
reverse("admin:admin_views_persona_change", args=(persona_id,)), post_data
)
self.assertEqual(response.status_code, 302)
self.assertEqual(Persona.objects.count(), 1)
self.assertEqual(FooAccount.objects.count(), 1)
self.assertEqual(BarAccount.objects.count(), 1)
self.assertEqual(FooAccount.objects.all()[0].username, "%s-1" % foo_user)
self.assertEqual(BarAccount.objects.all()[0].username, "%s-1" % bar_user)
self.assertEqual(Persona.objects.all()[0].accounts.count(), 2)
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestCustomChangeList(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_custom_changelist(self):
"""
Validate that a custom ChangeList class can be used (#9749)
"""
# Insert some data
post_data = {"name": "First Gadget"}
response = self.client.post(reverse("admin:admin_views_gadget_add"), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
# Hit the page once to get messages out of the queue message list
response = self.client.get(reverse("admin:admin_views_gadget_changelist"))
# Data is still not visible on the page
response = self.client.get(reverse("admin:admin_views_gadget_changelist"))
self.assertNotContains(response, "First Gadget")
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestInlineNotEditable(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_GET_parent_add(self):
"""
InlineModelAdmin broken?
"""
response = self.client.get(reverse("admin:admin_views_parent_add"))
self.assertEqual(response.status_code, 200)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminCustomQuerysetTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.pks = [EmptyModel.objects.create().id for i in range(3)]
def setUp(self):
self.client.force_login(self.superuser)
self.super_login = {
REDIRECT_FIELD_NAME: reverse("admin:index"),
"username": "super",
"password": "secret",
}
def test_changelist_view(self):
response = self.client.get(reverse("admin:admin_views_emptymodel_changelist"))
for i in self.pks:
if i > 1:
self.assertContains(response, "Primary key = %s" % i)
else:
self.assertNotContains(response, "Primary key = %s" % i)
def test_changelist_view_count_queries(self):
# create 2 Person objects
Person.objects.create(name="person1", gender=1)
Person.objects.create(name="person2", gender=2)
changelist_url = reverse("admin:admin_views_person_changelist")
# 5 queries are expected: 1 for the session, 1 for the user,
# 2 for the counts and 1 for the objects on the page
with self.assertNumQueries(5):
resp = self.client.get(changelist_url)
self.assertEqual(resp.context["selection_note"], "0 of 2 selected")
self.assertEqual(resp.context["selection_note_all"], "All 2 selected")
with self.assertNumQueries(5):
extra = {"q": "not_in_name"}
resp = self.client.get(changelist_url, extra)
self.assertEqual(resp.context["selection_note"], "0 of 0 selected")
self.assertEqual(resp.context["selection_note_all"], "All 0 selected")
with self.assertNumQueries(5):
extra = {"q": "person"}
resp = self.client.get(changelist_url, extra)
self.assertEqual(resp.context["selection_note"], "0 of 2 selected")
self.assertEqual(resp.context["selection_note_all"], "All 2 selected")
with self.assertNumQueries(5):
extra = {"gender__exact": "1"}
resp = self.client.get(changelist_url, extra)
self.assertEqual(resp.context["selection_note"], "0 of 1 selected")
self.assertEqual(resp.context["selection_note_all"], "1 selected")
def test_change_view(self):
for i in self.pks:
url = reverse("admin:admin_views_emptymodel_change", args=(i,))
response = self.client.get(url, follow=True)
if i > 1:
self.assertEqual(response.status_code, 200)
else:
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["empty model with ID “1” doesn’t exist. Perhaps it was deleted?"],
)
def test_add_model_modeladmin_defer_qs(self):
# Test for #14529. defer() is used in ModelAdmin.get_queryset()
# model has __str__ method
self.assertEqual(CoverLetter.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"author": "Candidate, Best",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_coverletter_add"), post_data, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(CoverLetter.objects.count(), 1)
# Message should contain non-ugly model verbose name
pk = CoverLetter.objects.all()[0].pk
self.assertContains(
response,
'<li class="success">The cover letter “<a href="%s">'
"Candidate, Best</a>” was added successfully.</li>"
% reverse("admin:admin_views_coverletter_change", args=(pk,)),
html=True,
)
# model has no __str__ method
self.assertEqual(ShortMessage.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"content": "What's this SMS thing?",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_shortmessage_add"), post_data, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(ShortMessage.objects.count(), 1)
# Message should contain non-ugly model verbose name
sm = ShortMessage.objects.all()[0]
self.assertContains(
response,
'<li class="success">The short message “<a href="%s">'
"%s</a>” was added successfully.</li>"
% (reverse("admin:admin_views_shortmessage_change", args=(sm.pk,)), sm),
html=True,
)
def test_add_model_modeladmin_only_qs(self):
# Test for #14529. only() is used in ModelAdmin.get_queryset()
# model has __str__ method
self.assertEqual(Telegram.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"title": "Urgent telegram",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_telegram_add"), post_data, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Telegram.objects.count(), 1)
# Message should contain non-ugly model verbose name
pk = Telegram.objects.all()[0].pk
self.assertContains(
response,
'<li class="success">The telegram “<a href="%s">'
"Urgent telegram</a>” was added successfully.</li>"
% reverse("admin:admin_views_telegram_change", args=(pk,)),
html=True,
)
# model has no __str__ method
self.assertEqual(Paper.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"title": "My Modified Paper Title",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_paper_add"), post_data, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Paper.objects.count(), 1)
# Message should contain non-ugly model verbose name
p = Paper.objects.all()[0]
self.assertContains(
response,
'<li class="success">The paper “<a href="%s">'
"%s</a>” was added successfully.</li>"
% (reverse("admin:admin_views_paper_change", args=(p.pk,)), p),
html=True,
)
def test_edit_model_modeladmin_defer_qs(self):
# Test for #14529. defer() is used in ModelAdmin.get_queryset()
# model has __str__ method
cl = CoverLetter.objects.create(author="John Doe")
self.assertEqual(CoverLetter.objects.count(), 1)
response = self.client.get(
reverse("admin:admin_views_coverletter_change", args=(cl.pk,))
)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"author": "John Doe II",
"_save": "Save",
}
url = reverse("admin:admin_views_coverletter_change", args=(cl.pk,))
response = self.client.post(url, post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(CoverLetter.objects.count(), 1)
# Message should contain non-ugly model verbose name. Instance
# representation is set by model's __str__()
self.assertContains(
response,
'<li class="success">The cover letter “<a href="%s">'
"John Doe II</a>” was changed successfully.</li>"
% reverse("admin:admin_views_coverletter_change", args=(cl.pk,)),
html=True,
)
# model has no __str__ method
sm = ShortMessage.objects.create(content="This is expensive")
self.assertEqual(ShortMessage.objects.count(), 1)
response = self.client.get(
reverse("admin:admin_views_shortmessage_change", args=(sm.pk,))
)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"content": "Too expensive",
"_save": "Save",
}
url = reverse("admin:admin_views_shortmessage_change", args=(sm.pk,))
response = self.client.post(url, post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(ShortMessage.objects.count(), 1)
# Message should contain non-ugly model verbose name. The ugly(!)
# instance representation is set by __str__().
self.assertContains(
response,
'<li class="success">The short message “<a href="%s">'
"%s</a>” was changed successfully.</li>"
% (reverse("admin:admin_views_shortmessage_change", args=(sm.pk,)), sm),
html=True,
)
def test_edit_model_modeladmin_only_qs(self):
# Test for #14529. only() is used in ModelAdmin.get_queryset()
# model has __str__ method
t = Telegram.objects.create(title="First Telegram")
self.assertEqual(Telegram.objects.count(), 1)
response = self.client.get(
reverse("admin:admin_views_telegram_change", args=(t.pk,))
)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"title": "Telegram without typo",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_telegram_change", args=(t.pk,)),
post_data,
follow=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Telegram.objects.count(), 1)
# Message should contain non-ugly model verbose name. The instance
# representation is set by model's __str__()
self.assertContains(
response,
'<li class="success">The telegram “<a href="%s">'
"Telegram without typo</a>” was changed successfully.</li>"
% reverse("admin:admin_views_telegram_change", args=(t.pk,)),
html=True,
)
# model has no __str__ method
p = Paper.objects.create(title="My Paper Title")
self.assertEqual(Paper.objects.count(), 1)
response = self.client.get(
reverse("admin:admin_views_paper_change", args=(p.pk,))
)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"title": "My Modified Paper Title",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_paper_change", args=(p.pk,)),
post_data,
follow=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Paper.objects.count(), 1)
# Message should contain non-ugly model verbose name. The ugly(!)
# instance representation is set by __str__().
self.assertContains(
response,
'<li class="success">The paper “<a href="%s">'
"%s</a>” was changed successfully.</li>"
% (reverse("admin:admin_views_paper_change", args=(p.pk,)), p),
html=True,
)
def test_history_view_custom_qs(self):
"""
Custom querysets are considered for the admin history view.
"""
self.client.post(reverse("admin:login"), self.super_login)
FilteredManager.objects.create(pk=1)
FilteredManager.objects.create(pk=2)
response = self.client.get(
reverse("admin:admin_views_filteredmanager_changelist")
)
self.assertContains(response, "PK=1")
self.assertContains(response, "PK=2")
self.assertEqual(
self.client.get(
reverse("admin:admin_views_filteredmanager_history", args=(1,))
).status_code,
200,
)
self.assertEqual(
self.client.get(
reverse("admin:admin_views_filteredmanager_history", args=(2,))
).status_code,
200,
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminInlineFileUploadTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
file1 = tempfile.NamedTemporaryFile(suffix=".file1")
file1.write(b"a" * (2**21))
filename = file1.name
file1.close()
cls.gallery = Gallery.objects.create(name="Test Gallery")
cls.picture = Picture.objects.create(
name="Test Picture",
image=filename,
gallery=cls.gallery,
)
def setUp(self):
self.client.force_login(self.superuser)
def test_form_has_multipart_enctype(self):
response = self.client.get(
reverse("admin:admin_views_gallery_change", args=(self.gallery.id,))
)
self.assertIs(response.context["has_file_field"], True)
self.assertContains(response, MULTIPART_ENCTYPE)
def test_inline_file_upload_edit_validation_error_post(self):
"""
Inline file uploads correctly display prior data (#10002).
"""
post_data = {
"name": "Test Gallery",
"pictures-TOTAL_FORMS": "2",
"pictures-INITIAL_FORMS": "1",
"pictures-MAX_NUM_FORMS": "0",
"pictures-0-id": str(self.picture.id),
"pictures-0-gallery": str(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post(
reverse("admin:admin_views_gallery_change", args=(self.gallery.id,)),
post_data,
)
self.assertContains(response, b"Currently")
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminInlineTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.collector = Collector.objects.create(pk=1, name="John Fowles")
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
"widget_set-0-name": "",
"widget_set-1-id": "",
"widget_set-1-owner": "1",
"widget_set-1-name": "",
"widget_set-2-id": "",
"widget_set-2-owner": "1",
"widget_set-2-name": "",
"doohickey_set-TOTAL_FORMS": "3",
"doohickey_set-INITIAL_FORMS": "0",
"doohickey_set-MAX_NUM_FORMS": "0",
"doohickey_set-0-owner": "1",
"doohickey_set-0-code": "",
"doohickey_set-0-name": "",
"doohickey_set-1-owner": "1",
"doohickey_set-1-code": "",
"doohickey_set-1-name": "",
"doohickey_set-2-owner": "1",
"doohickey_set-2-code": "",
"doohickey_set-2-name": "",
"grommet_set-TOTAL_FORMS": "3",
"grommet_set-INITIAL_FORMS": "0",
"grommet_set-MAX_NUM_FORMS": "0",
"grommet_set-0-code": "",
"grommet_set-0-owner": "1",
"grommet_set-0-name": "",
"grommet_set-1-code": "",
"grommet_set-1-owner": "1",
"grommet_set-1-name": "",
"grommet_set-2-code": "",
"grommet_set-2-owner": "1",
"grommet_set-2-name": "",
"whatsit_set-TOTAL_FORMS": "3",
"whatsit_set-INITIAL_FORMS": "0",
"whatsit_set-MAX_NUM_FORMS": "0",
"whatsit_set-0-owner": "1",
"whatsit_set-0-index": "",
"whatsit_set-0-name": "",
"whatsit_set-1-owner": "1",
"whatsit_set-1-index": "",
"whatsit_set-1-name": "",
"whatsit_set-2-owner": "1",
"whatsit_set-2-index": "",
"whatsit_set-2-name": "",
"fancydoodad_set-TOTAL_FORMS": "3",
"fancydoodad_set-INITIAL_FORMS": "0",
"fancydoodad_set-MAX_NUM_FORMS": "0",
"fancydoodad_set-0-doodad_ptr": "",
"fancydoodad_set-0-owner": "1",
"fancydoodad_set-0-name": "",
"fancydoodad_set-0-expensive": "on",
"fancydoodad_set-1-doodad_ptr": "",
"fancydoodad_set-1-owner": "1",
"fancydoodad_set-1-name": "",
"fancydoodad_set-1-expensive": "on",
"fancydoodad_set-2-doodad_ptr": "",
"fancydoodad_set-2-owner": "1",
"fancydoodad_set-2-name": "",
"fancydoodad_set-2-expensive": "on",
"category_set-TOTAL_FORMS": "3",
"category_set-INITIAL_FORMS": "0",
"category_set-MAX_NUM_FORMS": "0",
"category_set-0-order": "",
"category_set-0-id": "",
"category_set-0-collector": "1",
"category_set-1-order": "",
"category_set-1-id": "",
"category_set-1-collector": "1",
"category_set-2-order": "",
"category_set-2-id": "",
"category_set-2-collector": "1",
}
self.client.force_login(self.superuser)
def test_simple_inline(self):
"A simple model can be saved as inlines"
# First add a new inline
self.post_data["widget_set-0-name"] = "Widget 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1")
widget_id = Widget.objects.all()[0].id
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="widget_set-0-id"')
# No file or image fields, no enctype on the forms
self.assertIs(response.context["has_file_field"], False)
self.assertNotContains(response, MULTIPART_ENCTYPE)
# Now resave that inline
self.post_data["widget_set-INITIAL_FORMS"] = "1"
self.post_data["widget_set-0-id"] = str(widget_id)
self.post_data["widget_set-0-name"] = "Widget 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1")
# Now modify that inline
self.post_data["widget_set-INITIAL_FORMS"] = "1"
self.post_data["widget_set-0-id"] = str(widget_id)
self.post_data["widget_set-0-name"] = "Widget 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1 Updated")
def test_explicit_autofield_inline(self):
"""
A model with an explicit autofield primary key can be saved as inlines.
"""
# First add a new inline
self.post_data["grommet_set-0-name"] = "Grommet 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1")
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="grommet_set-0-code"')
# Now resave that inline
self.post_data["grommet_set-INITIAL_FORMS"] = "1"
self.post_data["grommet_set-0-code"] = str(Grommet.objects.all()[0].code)
self.post_data["grommet_set-0-name"] = "Grommet 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1")
# Now modify that inline
self.post_data["grommet_set-INITIAL_FORMS"] = "1"
self.post_data["grommet_set-0-code"] = str(Grommet.objects.all()[0].code)
self.post_data["grommet_set-0-name"] = "Grommet 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1 Updated")
def test_char_pk_inline(self):
"A model with a character PK can be saved as inlines. Regression for #10992"
# First add a new inline
self.post_data["doohickey_set-0-code"] = "DH1"
self.post_data["doohickey_set-0-name"] = "Doohickey 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1")
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="doohickey_set-0-code"')
# Now resave that inline
self.post_data["doohickey_set-INITIAL_FORMS"] = "1"
self.post_data["doohickey_set-0-code"] = "DH1"
self.post_data["doohickey_set-0-name"] = "Doohickey 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1")
# Now modify that inline
self.post_data["doohickey_set-INITIAL_FORMS"] = "1"
self.post_data["doohickey_set-0-code"] = "DH1"
self.post_data["doohickey_set-0-name"] = "Doohickey 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1 Updated")
def test_integer_pk_inline(self):
"A model with an integer PK can be saved as inlines. Regression for #10992"
# First add a new inline
self.post_data["whatsit_set-0-index"] = "42"
self.post_data["whatsit_set-0-name"] = "Whatsit 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1")
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="whatsit_set-0-index"')
# Now resave that inline
self.post_data["whatsit_set-INITIAL_FORMS"] = "1"
self.post_data["whatsit_set-0-index"] = "42"
self.post_data["whatsit_set-0-name"] = "Whatsit 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1")
# Now modify that inline
self.post_data["whatsit_set-INITIAL_FORMS"] = "1"
self.post_data["whatsit_set-0-index"] = "42"
self.post_data["whatsit_set-0-name"] = "Whatsit 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1 Updated")
def test_inherited_inline(self):
"An inherited model can be saved as inlines. Regression for #11042"
# First add a new inline
self.post_data["fancydoodad_set-0-name"] = "Fancy Doodad 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1")
doodad_pk = FancyDoodad.objects.all()[0].pk
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="fancydoodad_set-0-doodad_ptr"')
# Now resave that inline
self.post_data["fancydoodad_set-INITIAL_FORMS"] = "1"
self.post_data["fancydoodad_set-0-doodad_ptr"] = str(doodad_pk)
self.post_data["fancydoodad_set-0-name"] = "Fancy Doodad 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1")
# Now modify that inline
self.post_data["fancydoodad_set-INITIAL_FORMS"] = "1"
self.post_data["fancydoodad_set-0-doodad_ptr"] = str(doodad_pk)
self.post_data["fancydoodad_set-0-name"] = "Fancy Doodad 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1 Updated")
def test_ordered_inline(self):
"""
An inline with an editable ordering fields is updated correctly.
"""
# Create some objects with an initial ordering
Category.objects.create(id=1, order=1, collector=self.collector)
Category.objects.create(id=2, order=2, collector=self.collector)
Category.objects.create(id=3, order=0, collector=self.collector)
Category.objects.create(id=4, order=0, collector=self.collector)
# NB: The order values must be changed so that the items are reordered.
self.post_data.update(
{
"name": "Frederick Clegg",
"category_set-TOTAL_FORMS": "7",
"category_set-INITIAL_FORMS": "4",
"category_set-MAX_NUM_FORMS": "0",
"category_set-0-order": "14",
"category_set-0-id": "1",
"category_set-0-collector": "1",
"category_set-1-order": "13",
"category_set-1-id": "2",
"category_set-1-collector": "1",
"category_set-2-order": "1",
"category_set-2-id": "3",
"category_set-2-collector": "1",
"category_set-3-order": "0",
"category_set-3-id": "4",
"category_set-3-collector": "1",
"category_set-4-order": "",
"category_set-4-id": "",
"category_set-4-collector": "1",
"category_set-5-order": "",
"category_set-5-id": "",
"category_set-5-collector": "1",
"category_set-6-order": "",
"category_set-6-id": "",
"category_set-6-collector": "1",
}
)
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
# Successful post will redirect
self.assertEqual(response.status_code, 302)
# The order values have been applied to the right objects
self.assertEqual(self.collector.category_set.count(), 4)
self.assertEqual(Category.objects.get(id=1).order, 14)
self.assertEqual(Category.objects.get(id=2).order, 13)
self.assertEqual(Category.objects.get(id=3).order, 1)
self.assertEqual(Category.objects.get(id=4).order, 0)
@override_settings(ROOT_URLCONF="admin_views.urls")
class NeverCacheTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
def setUp(self):
self.client.force_login(self.superuser)
def test_admin_index(self):
"Check the never-cache status of the main index"
response = self.client.get(reverse("admin:index"))
self.assertEqual(get_max_age(response), 0)
def test_app_index(self):
"Check the never-cache status of an application index"
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertEqual(get_max_age(response), 0)
def test_model_index(self):
"Check the never-cache status of a model index"
response = self.client.get(reverse("admin:admin_views_fabric_changelist"))
self.assertEqual(get_max_age(response), 0)
def test_model_add(self):
"Check the never-cache status of a model add page"
response = self.client.get(reverse("admin:admin_views_fabric_add"))
self.assertEqual(get_max_age(response), 0)
def test_model_view(self):
"Check the never-cache status of a model edit page"
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(get_max_age(response), 0)
def test_model_history(self):
"Check the never-cache status of a model history page"
response = self.client.get(
reverse("admin:admin_views_section_history", args=(self.s1.pk,))
)
self.assertEqual(get_max_age(response), 0)
def test_model_delete(self):
"Check the never-cache status of a model delete page"
response = self.client.get(
reverse("admin:admin_views_section_delete", args=(self.s1.pk,))
)
self.assertEqual(get_max_age(response), 0)
def test_login(self):
"Check the never-cache status of login views"
self.client.logout()
response = self.client.get(reverse("admin:index"))
self.assertEqual(get_max_age(response), 0)
def test_logout(self):
"Check the never-cache status of logout view"
response = self.client.post(reverse("admin:logout"))
self.assertEqual(get_max_age(response), 0)
def test_password_change(self):
"Check the never-cache status of the password change view"
self.client.logout()
response = self.client.get(reverse("admin:password_change"))
self.assertIsNone(get_max_age(response))
def test_password_change_done(self):
"Check the never-cache status of the password change done view"
response = self.client.get(reverse("admin:password_change_done"))
self.assertIsNone(get_max_age(response))
def test_JS_i18n(self):
"Check the never-cache status of the JavaScript i18n view"
response = self.client.get(reverse("admin:jsi18n"))
self.assertIsNone(get_max_age(response))
@override_settings(ROOT_URLCONF="admin_views.urls")
class PrePopulatedTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_prepopulated_on(self):
response = self.client.get(reverse("admin:admin_views_prepopulatedpost_add"))
self.assertContains(response, ""id": "#id_slug"")
self.assertContains(
response, ""dependency_ids": ["#id_title"]"
)
self.assertContains(
response,
""id": "#id_prepopulatedsubpost_set-0-subslug"",
)
def test_prepopulated_off(self):
response = self.client.get(
reverse("admin:admin_views_prepopulatedpost_change", args=(self.p1.pk,))
)
self.assertContains(response, "A Long Title")
self.assertNotContains(response, ""id": "#id_slug"")
self.assertNotContains(
response, ""dependency_ids": ["#id_title"]"
)
self.assertNotContains(
response,
""id": "#id_prepopulatedsubpost_set-0-subslug"",
)
@override_settings(USE_THOUSAND_SEPARATOR=True)
def test_prepopulated_maxlength_localized(self):
"""
Regression test for #15938: if USE_THOUSAND_SEPARATOR is set, make sure
that maxLength (in the JavaScript) is rendered without separators.
"""
response = self.client.get(
reverse("admin:admin_views_prepopulatedpostlargeslug_add")
)
self.assertContains(response, ""maxLength": 1000") # instead of 1,000
def test_view_only_add_form(self):
"""
PrePopulatedPostReadOnlyAdmin.prepopulated_fields includes 'slug'
which is present in the add view, even if the
ModelAdmin.has_change_permission() returns False.
"""
response = self.client.get(reverse("admin7:admin_views_prepopulatedpost_add"))
self.assertContains(response, "data-prepopulated-fields=")
self.assertContains(response, ""id": "#id_slug"")
def test_view_only_change_form(self):
"""
PrePopulatedPostReadOnlyAdmin.prepopulated_fields includes 'slug'. That
doesn't break a view-only change view.
"""
response = self.client.get(
reverse("admin7:admin_views_prepopulatedpost_change", args=(self.p1.pk,))
)
self.assertContains(response, 'data-prepopulated-fields="[]"')
self.assertContains(response, '<div class="readonly">%s</div>' % self.p1.slug)
@override_settings(ROOT_URLCONF="admin_views.urls")
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ["admin_views"] + AdminSeleniumTestCase.available_apps
def setUp(self):
self.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
self.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
def test_login_button_centered(self):
from selenium.webdriver.common.by import By
self.selenium.get(self.live_server_url + reverse("admin:login"))
button = self.selenium.find_element(By.CSS_SELECTOR, ".submit-row input")
offset_left = button.get_property("offsetLeft")
offset_right = button.get_property("offsetParent").get_property(
"offsetWidth"
) - (offset_left + button.get_property("offsetWidth"))
# Use assertAlmostEqual to avoid pixel rounding errors.
self.assertAlmostEqual(offset_left, offset_right, delta=3)
def test_prepopulated_fields(self):
"""
The JavaScript-automated prepopulated fields work with the main form
and with stacked and tabular inlines.
Refs #13068, #9264, #9983, #9784.
"""
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_mainprepopulated_add")
)
self.wait_for(".select2")
# Main form ----------------------------------------------------------
self.selenium.find_element(By.ID, "id_pubdate").send_keys("2012-02-18")
self.select_option("#id_status", "option two")
self.selenium.find_element(By.ID, "id_name").send_keys(
" the mAin nÀMë and it's awεšomeıııİ"
)
slug1 = self.selenium.find_element(By.ID, "id_slug1").get_attribute("value")
slug2 = self.selenium.find_element(By.ID, "id_slug2").get_attribute("value")
slug3 = self.selenium.find_element(By.ID, "id_slug3").get_attribute("value")
self.assertEqual(slug1, "the-main-name-and-its-awesomeiiii-2012-02-18")
self.assertEqual(slug2, "option-two-the-main-name-and-its-awesomeiiii")
self.assertEqual(
slug3, "the-main-n\xe0m\xeb-and-its-aw\u03b5\u0161ome\u0131\u0131\u0131i"
)
# Stacked inlines with fieldsets -------------------------------------
# Initial inline
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-0-pubdate"
).send_keys("2011-12-17")
self.select_option("#id_relatedprepopulated_set-0-status", "option one")
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-0-name"
).send_keys(" here is a sŤāÇkeð inline ! ")
slug1 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-0-slug1"
).get_attribute("value")
slug2 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-0-slug2"
).get_attribute("value")
self.assertEqual(slug1, "here-is-a-stacked-inline-2011-12-17")
self.assertEqual(slug2, "option-one-here-is-a-stacked-inline")
initial_select2_inputs = self.selenium.find_elements(
By.CLASS_NAME, "select2-selection"
)
# Inline formsets have empty/invisible forms.
# Only the 4 visible select2 inputs are initialized.
num_initial_select2_inputs = len(initial_select2_inputs)
self.assertEqual(num_initial_select2_inputs, 4)
# Add an inline
self.selenium.find_elements(By.LINK_TEXT, "Add another Related prepopulated")[
0
].click()
self.assertEqual(
len(self.selenium.find_elements(By.CLASS_NAME, "select2-selection")),
num_initial_select2_inputs + 2,
)
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-1-pubdate"
).send_keys("1999-01-25")
self.select_option("#id_relatedprepopulated_set-1-status", "option two")
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-1-name"
).send_keys(
" now you haVe anöther sŤāÇkeð inline with a very ... "
"loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooog "
"text... "
)
slug1 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-1-slug1"
).get_attribute("value")
slug2 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-1-slug2"
).get_attribute("value")
# 50 characters maximum for slug1 field
self.assertEqual(slug1, "now-you-have-another-stacked-inline-with-a-very-lo")
# 60 characters maximum for slug2 field
self.assertEqual(
slug2, "option-two-now-you-have-another-stacked-inline-with-a-very-l"
)
# Tabular inlines ----------------------------------------------------
# Initial inline
element = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-status"
)
self.selenium.execute_script("window.scrollTo(0, %s);" % element.location["y"])
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-pubdate"
).send_keys("1234-12-07")
self.select_option("#id_relatedprepopulated_set-2-0-status", "option two")
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-name"
).send_keys("And now, with a tÃbűlaŘ inline !!!")
slug1 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-slug1"
).get_attribute("value")
slug2 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-slug2"
).get_attribute("value")
self.assertEqual(slug1, "and-now-with-a-tabular-inline-1234-12-07")
self.assertEqual(slug2, "option-two-and-now-with-a-tabular-inline")
# Add an inline
# Button may be outside the browser frame.
element = self.selenium.find_elements(
By.LINK_TEXT, "Add another Related prepopulated"
)[1]
self.selenium.execute_script("window.scrollTo(0, %s);" % element.location["y"])
element.click()
self.assertEqual(
len(self.selenium.find_elements(By.CLASS_NAME, "select2-selection")),
num_initial_select2_inputs + 4,
)
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-1-pubdate"
).send_keys("1981-08-22")
self.select_option("#id_relatedprepopulated_set-2-1-status", "option one")
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-1-name"
).send_keys(r'tÃbűlaŘ inline with ignored ;"&*^\%$#@-/`~ characters')
slug1 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-1-slug1"
).get_attribute("value")
slug2 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-1-slug2"
).get_attribute("value")
self.assertEqual(slug1, "tabular-inline-with-ignored-characters-1981-08-22")
self.assertEqual(slug2, "option-one-tabular-inline-with-ignored-characters")
# Add an inline without an initial inline.
# The button is outside of the browser frame.
self.selenium.execute_script("window.scrollTo(0, document.body.scrollHeight);")
self.selenium.find_elements(By.LINK_TEXT, "Add another Related prepopulated")[
2
].click()
self.assertEqual(
len(self.selenium.find_elements(By.CLASS_NAME, "select2-selection")),
num_initial_select2_inputs + 6,
)
# Stacked Inlines without fieldsets ----------------------------------
# Initial inline.
row_id = "id_relatedprepopulated_set-4-0-"
self.selenium.find_element(By.ID, f"{row_id}pubdate").send_keys("2011-12-12")
self.select_option(f"#{row_id}status", "option one")
self.selenium.find_element(By.ID, f"{row_id}name").send_keys(
" sŤāÇkeð inline ! "
)
slug1 = self.selenium.find_element(By.ID, f"{row_id}slug1").get_attribute(
"value"
)
slug2 = self.selenium.find_element(By.ID, f"{row_id}slug2").get_attribute(
"value"
)
self.assertEqual(slug1, "stacked-inline-2011-12-12")
self.assertEqual(slug2, "option-one")
# Add inline.
self.selenium.find_elements(
By.LINK_TEXT,
"Add another Related prepopulated",
)[3].click()
row_id = "id_relatedprepopulated_set-4-1-"
self.selenium.find_element(By.ID, f"{row_id}pubdate").send_keys("1999-01-20")
self.select_option(f"#{row_id}status", "option two")
self.selenium.find_element(By.ID, f"{row_id}name").send_keys(
" now you haVe anöther sŤāÇkeð inline with a very loooong "
)
slug1 = self.selenium.find_element(By.ID, f"{row_id}slug1").get_attribute(
"value"
)
slug2 = self.selenium.find_element(By.ID, f"{row_id}slug2").get_attribute(
"value"
)
self.assertEqual(slug1, "now-you-have-another-stacked-inline-with-a-very-lo")
self.assertEqual(slug2, "option-two")
# Save and check that everything is properly stored in the database
with self.wait_page_loaded():
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.assertEqual(MainPrepopulated.objects.count(), 1)
MainPrepopulated.objects.get(
name=" the mAin nÀMë and it's awεšomeıııİ",
pubdate="2012-02-18",
status="option two",
slug1="the-main-name-and-its-awesomeiiii-2012-02-18",
slug2="option-two-the-main-name-and-its-awesomeiiii",
slug3="the-main-nàmë-and-its-awεšomeıııi",
)
self.assertEqual(RelatedPrepopulated.objects.count(), 6)
RelatedPrepopulated.objects.get(
name=" here is a sŤāÇkeð inline ! ",
pubdate="2011-12-17",
status="option one",
slug1="here-is-a-stacked-inline-2011-12-17",
slug2="option-one-here-is-a-stacked-inline",
)
RelatedPrepopulated.objects.get(
# 75 characters in name field
name=(
" now you haVe anöther sŤāÇkeð inline with a very ... "
"loooooooooooooooooo"
),
pubdate="1999-01-25",
status="option two",
slug1="now-you-have-another-stacked-inline-with-a-very-lo",
slug2="option-two-now-you-have-another-stacked-inline-with-a-very-l",
)
RelatedPrepopulated.objects.get(
name="And now, with a tÃbűlaŘ inline !!!",
pubdate="1234-12-07",
status="option two",
slug1="and-now-with-a-tabular-inline-1234-12-07",
slug2="option-two-and-now-with-a-tabular-inline",
)
RelatedPrepopulated.objects.get(
name=r'tÃbűlaŘ inline with ignored ;"&*^\%$#@-/`~ characters',
pubdate="1981-08-22",
status="option one",
slug1="tabular-inline-with-ignored-characters-1981-08-22",
slug2="option-one-tabular-inline-with-ignored-characters",
)
def test_populate_existing_object(self):
"""
The prepopulation works for existing objects too, as long as
the original field is empty (#19082).
"""
from selenium.webdriver.common.by import By
# Slugs are empty to start with.
item = MainPrepopulated.objects.create(
name=" this is the mAin nÀMë",
pubdate="2012-02-18",
status="option two",
slug1="",
slug2="",
)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
object_url = self.live_server_url + reverse(
"admin:admin_views_mainprepopulated_change", args=(item.id,)
)
self.selenium.get(object_url)
self.selenium.find_element(By.ID, "id_name").send_keys(" the best")
# The slugs got prepopulated since they were originally empty
slug1 = self.selenium.find_element(By.ID, "id_slug1").get_attribute("value")
slug2 = self.selenium.find_element(By.ID, "id_slug2").get_attribute("value")
self.assertEqual(slug1, "this-is-the-main-name-the-best-2012-02-18")
self.assertEqual(slug2, "option-two-this-is-the-main-name-the-best")
# Save the object
with self.wait_page_loaded():
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.get(object_url)
self.selenium.find_element(By.ID, "id_name").send_keys(" hello")
# The slugs got prepopulated didn't change since they were originally not empty
slug1 = self.selenium.find_element(By.ID, "id_slug1").get_attribute("value")
slug2 = self.selenium.find_element(By.ID, "id_slug2").get_attribute("value")
self.assertEqual(slug1, "this-is-the-main-name-the-best-2012-02-18")
self.assertEqual(slug2, "option-two-this-is-the-main-name-the-best")
def test_collapsible_fieldset(self):
"""
The 'collapse' class in fieldsets definition allows to
show/hide the appropriate field section.
"""
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_article_add")
)
self.assertFalse(self.selenium.find_element(By.ID, "id_title").is_displayed())
self.selenium.find_elements(By.LINK_TEXT, "Show")[0].click()
self.assertTrue(self.selenium.find_element(By.ID, "id_title").is_displayed())
self.assertEqual(
self.selenium.find_element(By.ID, "fieldsetcollapser0").text, "Hide"
)
def test_selectbox_height_collapsible_fieldset(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super",
password="secret",
login_url=reverse("admin7:index"),
)
url = self.live_server_url + reverse("admin7:admin_views_pizza_add")
self.selenium.get(url)
self.selenium.find_elements(By.LINK_TEXT, "Show")[0].click()
filter_box = self.selenium.find_element(By.ID, "id_toppings_filter")
from_box = self.selenium.find_element(By.ID, "id_toppings_from")
to_box = self.selenium.find_element(By.ID, "id_toppings_to")
self.assertEqual(
to_box.get_property("offsetHeight"),
(
filter_box.get_property("offsetHeight")
+ from_box.get_property("offsetHeight")
),
)
def test_selectbox_height_not_collapsible_fieldset(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super",
password="secret",
login_url=reverse("admin7:index"),
)
url = self.live_server_url + reverse("admin7:admin_views_question_add")
self.selenium.get(url)
filter_box = self.selenium.find_element(By.ID, "id_related_questions_filter")
from_box = self.selenium.find_element(By.ID, "id_related_questions_from")
to_box = self.selenium.find_element(By.ID, "id_related_questions_to")
self.assertEqual(
to_box.get_property("offsetHeight"),
(
filter_box.get_property("offsetHeight")
+ from_box.get_property("offsetHeight")
),
)
def test_first_field_focus(self):
"""JavaScript-assisted auto-focus on first usable form field."""
from selenium.webdriver.common.by import By
# First form field has a single widget
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
with self.wait_page_loaded():
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_picture_add")
)
self.assertEqual(
self.selenium.switch_to.active_element,
self.selenium.find_element(By.ID, "id_name"),
)
# First form field has a MultiWidget
with self.wait_page_loaded():
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_reservation_add")
)
self.assertEqual(
self.selenium.switch_to.active_element,
self.selenium.find_element(By.ID, "id_start_date_0"),
)
def test_cancel_delete_confirmation(self):
"Cancelling the deletion of an object takes the user back one page."
from selenium.webdriver.common.by import By
pizza = Pizza.objects.create(name="Double Cheese")
url = reverse("admin:admin_views_pizza_change", args=(pizza.id,))
full_url = self.live_server_url + url
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(full_url)
self.selenium.find_element(By.CLASS_NAME, "deletelink").click()
# Click 'cancel' on the delete page.
self.selenium.find_element(By.CLASS_NAME, "cancel-link").click()
# Wait until we're back on the change page.
self.wait_for_text("#content h1", "Change pizza")
self.assertEqual(self.selenium.current_url, full_url)
self.assertEqual(Pizza.objects.count(), 1)
def test_cancel_delete_related_confirmation(self):
"""
Cancelling the deletion of an object with relations takes the user back
one page.
"""
from selenium.webdriver.common.by import By
pizza = Pizza.objects.create(name="Double Cheese")
topping1 = Topping.objects.create(name="Cheddar")
topping2 = Topping.objects.create(name="Mozzarella")
pizza.toppings.add(topping1, topping2)
url = reverse("admin:admin_views_pizza_change", args=(pizza.id,))
full_url = self.live_server_url + url
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(full_url)
self.selenium.find_element(By.CLASS_NAME, "deletelink").click()
# Click 'cancel' on the delete page.
self.selenium.find_element(By.CLASS_NAME, "cancel-link").click()
# Wait until we're back on the change page.
self.wait_for_text("#content h1", "Change pizza")
self.assertEqual(self.selenium.current_url, full_url)
self.assertEqual(Pizza.objects.count(), 1)
self.assertEqual(Topping.objects.count(), 2)
def test_list_editable_popups(self):
"""
list_editable foreign keys have add/change popups.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
s1 = Section.objects.create(name="Test section")
Article.objects.create(
title="foo",
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=s1,
)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_article_changelist")
)
# Change popup
self.selenium.find_element(By.ID, "change_id_form-0-section").click()
self.wait_for_and_switch_to_popup()
self.wait_for_text("#content h1", "Change section")
name_input = self.selenium.find_element(By.ID, "id_name")
name_input.clear()
name_input.send_keys("<i>edited section</i>")
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
# Hide sidebar.
toggle_button = self.selenium.find_element(
By.CSS_SELECTOR, "#toggle-nav-sidebar"
)
toggle_button.click()
select = Select(self.selenium.find_element(By.ID, "id_form-0-section"))
self.assertEqual(select.first_selected_option.text, "<i>edited section</i>")
# Rendered select2 input.
select2_display = self.selenium.find_element(
By.CLASS_NAME, "select2-selection__rendered"
)
# Clear button (×\n) is included in text.
self.assertEqual(select2_display.text, "×\n<i>edited section</i>")
# Add popup
self.selenium.find_element(By.ID, "add_id_form-0-section").click()
self.wait_for_and_switch_to_popup()
self.wait_for_text("#content h1", "Add section")
self.selenium.find_element(By.ID, "id_name").send_keys("new section")
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
select = Select(self.selenium.find_element(By.ID, "id_form-0-section"))
self.assertEqual(select.first_selected_option.text, "new section")
select2_display = self.selenium.find_element(
By.CLASS_NAME, "select2-selection__rendered"
)
# Clear button (×\n) is included in text.
self.assertEqual(select2_display.text, "×\nnew section")
def test_inline_uuid_pk_edit_with_popup(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
parent = ParentWithUUIDPK.objects.create(title="test")
related_with_parent = RelatedWithUUIDPKModel.objects.create(parent=parent)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
change_url = reverse(
"admin:admin_views_relatedwithuuidpkmodel_change",
args=(related_with_parent.id,),
)
self.selenium.get(self.live_server_url + change_url)
self.selenium.find_element(By.ID, "change_id_parent").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
select = Select(self.selenium.find_element(By.ID, "id_parent"))
self.assertEqual(select.first_selected_option.text, str(parent.id))
self.assertEqual(
select.first_selected_option.get_attribute("value"), str(parent.id)
)
def test_inline_uuid_pk_add_with_popup(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(
self.live_server_url
+ reverse("admin:admin_views_relatedwithuuidpkmodel_add")
)
self.selenium.find_element(By.ID, "add_id_parent").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.ID, "id_title").send_keys("test")
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
select = Select(self.selenium.find_element(By.ID, "id_parent"))
uuid_id = str(ParentWithUUIDPK.objects.first().id)
self.assertEqual(select.first_selected_option.text, uuid_id)
self.assertEqual(select.first_selected_option.get_attribute("value"), uuid_id)
def test_inline_uuid_pk_delete_with_popup(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
parent = ParentWithUUIDPK.objects.create(title="test")
related_with_parent = RelatedWithUUIDPKModel.objects.create(parent=parent)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
change_url = reverse(
"admin:admin_views_relatedwithuuidpkmodel_change",
args=(related_with_parent.id,),
)
self.selenium.get(self.live_server_url + change_url)
self.selenium.find_element(By.ID, "delete_id_parent").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.XPATH, '//input[@value="Yes, I’m sure"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
select = Select(self.selenium.find_element(By.ID, "id_parent"))
self.assertEqual(ParentWithUUIDPK.objects.count(), 0)
self.assertEqual(select.first_selected_option.text, "---------")
self.assertEqual(select.first_selected_option.get_attribute("value"), "")
def test_inline_with_popup_cancel_delete(self):
"""Clicking ""No, take me back" on a delete popup closes the window."""
from selenium.webdriver.common.by import By
parent = ParentWithUUIDPK.objects.create(title="test")
related_with_parent = RelatedWithUUIDPKModel.objects.create(parent=parent)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
change_url = reverse(
"admin:admin_views_relatedwithuuidpkmodel_change",
args=(related_with_parent.id,),
)
self.selenium.get(self.live_server_url + change_url)
self.selenium.find_element(By.ID, "delete_id_parent").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.XPATH, '//a[text()="No, take me back"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
self.assertEqual(len(self.selenium.window_handles), 1)
def test_list_editable_raw_id_fields(self):
from selenium.webdriver.common.by import By
parent = ParentWithUUIDPK.objects.create(title="test")
parent2 = ParentWithUUIDPK.objects.create(title="test2")
RelatedWithUUIDPKModel.objects.create(parent=parent)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
change_url = reverse(
"admin:admin_views_relatedwithuuidpkmodel_changelist",
current_app=site2.name,
)
self.selenium.get(self.live_server_url + change_url)
self.selenium.find_element(By.ID, "lookup_id_form-0-parent").click()
self.wait_for_and_switch_to_popup()
# Select "parent2" in the popup.
self.selenium.find_element(By.LINK_TEXT, str(parent2.pk)).click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
# The newly selected pk should appear in the raw id input.
value = self.selenium.find_element(By.ID, "id_form-0-parent").get_attribute(
"value"
)
self.assertEqual(value, str(parent2.pk))
def test_input_element_font(self):
"""
Browsers' default stylesheets override the font of inputs. The admin
adds additional CSS to handle this.
"""
from selenium.webdriver.common.by import By
self.selenium.get(self.live_server_url + reverse("admin:login"))
element = self.selenium.find_element(By.ID, "id_username")
# Some browsers quotes the fonts, some don't.
fonts = [
font.strip().strip('"')
for font in element.value_of_css_property("font-family").split(",")
]
self.assertEqual(
fonts,
["Roboto", "Lucida Grande", "Verdana", "Arial", "sans-serif"],
)
def test_search_input_filtered_page(self):
from selenium.webdriver.common.by import By
Person.objects.create(name="Guido van Rossum", gender=1, alive=True)
Person.objects.create(name="Grace Hopper", gender=1, alive=False)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
person_url = reverse("admin:admin_views_person_changelist") + "?q=Gui"
self.selenium.get(self.live_server_url + person_url)
self.assertGreater(
self.selenium.find_element(By.ID, "searchbar").rect["width"],
50,
)
def test_related_popup_index(self):
"""
Create a chain of 'self' related objects via popups.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin:admin_views_box_add", current_app=site.name)
self.selenium.get(self.live_server_url + add_url)
base_window = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup()
popup_window_test = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "id_title").send_keys("test")
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup(num_windows=3)
popup_window_test2 = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "id_title").send_keys("test2")
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup(num_windows=4)
self.selenium.find_element(By.ID, "id_title").send_keys("test3")
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(popup_window_test2)
select = Select(self.selenium.find_element(By.ID, "id_next_box"))
next_box_id = str(Box.objects.get(title="test3").id)
self.assertEqual(
select.first_selected_option.get_attribute("value"), next_box_id
)
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(popup_window_test)
select = Select(self.selenium.find_element(By.ID, "id_next_box"))
next_box_id = str(Box.objects.get(title="test2").id)
self.assertEqual(
select.first_selected_option.get_attribute("value"), next_box_id
)
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(base_window)
select = Select(self.selenium.find_element(By.ID, "id_next_box"))
next_box_id = str(Box.objects.get(title="test").id)
self.assertEqual(
select.first_selected_option.get_attribute("value"), next_box_id
)
def test_related_popup_incorrect_close(self):
"""
Cleanup child popups when closing a parent popup.
"""
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin:admin_views_box_add", current_app=site.name)
self.selenium.get(self.live_server_url + add_url)
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup()
test_window = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "id_title").send_keys("test")
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup(num_windows=3)
test2_window = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "id_title").send_keys("test2")
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup(num_windows=4)
self.assertEqual(len(self.selenium.window_handles), 4)
self.selenium.switch_to.window(test2_window)
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.wait_until(lambda d: len(d.window_handles) == 2, 1)
self.assertEqual(len(self.selenium.window_handles), 2)
# Close final popup to clean up test.
self.selenium.switch_to.window(test_window)
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.wait_until(lambda d: len(d.window_handles) == 1, 1)
self.selenium.switch_to.window(self.selenium.window_handles[-1])
def test_hidden_fields_small_window(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super",
password="secret",
login_url=reverse("admin:index"),
)
self.selenium.get(self.live_server_url + reverse("admin:admin_views_story_add"))
field_title = self.selenium.find_element(By.CLASS_NAME, "field-title")
current_size = self.selenium.get_window_size()
try:
self.selenium.set_window_size(1024, 768)
self.assertIs(field_title.is_displayed(), False)
self.selenium.set_window_size(767, 575)
self.assertIs(field_title.is_displayed(), False)
finally:
self.selenium.set_window_size(current_size["width"], current_size["height"])
def test_updating_related_objects_updates_fk_selects(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
born_country_select_id = "id_born_country"
living_country_select_id = "id_living_country"
favorite_country_to_vacation_select_id = "id_favorite_country_to_vacation"
continent_select_id = "id_continent"
def _get_HTML_inside_element_by_id(id_):
return self.selenium.find_element(By.ID, id_).get_attribute("innerHTML")
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin:admin_views_traveler_add")
self.selenium.get(self.live_server_url + add_url)
# Add new Country from the born_country select.
self.selenium.find_element(By.ID, f"add_{born_country_select_id}").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.ID, "id_name").send_keys("Argentina")
continent_select = Select(
self.selenium.find_element(By.ID, continent_select_id)
)
continent_select.select_by_visible_text("South America")
self.selenium.find_element(By.CSS_SELECTOR, '[type="submit"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(born_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1" selected="">Argentina</option>
""",
)
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(living_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1">Argentina</option>
""",
)
# Argentina won't appear because favorite_country_to_vacation field has
# limit_choices_to.
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(favorite_country_to_vacation_select_id),
'<option value="" selected="">---------</option>',
)
# Add new Country from the living_country select.
self.selenium.find_element(By.ID, f"add_{living_country_select_id}").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.ID, "id_name").send_keys("Spain")
continent_select = Select(
self.selenium.find_element(By.ID, continent_select_id)
)
continent_select.select_by_visible_text("Europe")
self.selenium.find_element(By.CSS_SELECTOR, '[type="submit"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(born_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1" selected="">Argentina</option>
<option value="2">Spain</option>
""",
)
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(living_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1">Argentina</option>
<option value="2" selected="">Spain</option>
""",
)
# Spain won't appear because favorite_country_to_vacation field has
# limit_choices_to.
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(favorite_country_to_vacation_select_id),
'<option value="" selected="">---------</option>',
)
# Edit second Country created from living_country select.
favorite_select = Select(
self.selenium.find_element(By.ID, living_country_select_id)
)
favorite_select.select_by_visible_text("Spain")
self.selenium.find_element(By.ID, f"change_{living_country_select_id}").click()
self.wait_for_and_switch_to_popup()
favorite_name_input = self.selenium.find_element(By.ID, "id_name")
favorite_name_input.clear()
favorite_name_input.send_keys("Italy")
self.selenium.find_element(By.CSS_SELECTOR, '[type="submit"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(born_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1" selected="">Argentina</option>
<option value="2">Italy</option>
""",
)
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(living_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1">Argentina</option>
<option value="2" selected="">Italy</option>
""",
)
# favorite_country_to_vacation field has no options.
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(favorite_country_to_vacation_select_id),
'<option value="" selected="">---------</option>',
)
# Add a new Asian country.
self.selenium.find_element(
By.ID, f"add_{favorite_country_to_vacation_select_id}"
).click()
self.wait_for_and_switch_to_popup()
favorite_name_input = self.selenium.find_element(By.ID, "id_name")
favorite_name_input.send_keys("Qatar")
continent_select = Select(
self.selenium.find_element(By.ID, continent_select_id)
)
continent_select.select_by_visible_text("Asia")
self.selenium.find_element(By.CSS_SELECTOR, '[type="submit"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
# Submit the new Traveler.
self.selenium.find_element(By.CSS_SELECTOR, '[name="_save"]').click()
traveler = Traveler.objects.get()
self.assertEqual(traveler.born_country.name, "Argentina")
self.assertEqual(traveler.living_country.name, "Italy")
self.assertEqual(traveler.favorite_country_to_vacation.name, "Qatar")
def test_redirect_on_add_view_add_another_button(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin7:admin_views_section_add")
self.selenium.get(self.live_server_url + add_url)
name_input = self.selenium.find_element(By.ID, "id_name")
name_input.send_keys("Test section 1")
self.selenium.find_element(
By.XPATH, '//input[@value="Save and add another"]'
).click()
self.assertEqual(Section.objects.count(), 1)
name_input = self.selenium.find_element(By.ID, "id_name")
name_input.send_keys("Test section 2")
self.selenium.find_element(
By.XPATH, '//input[@value="Save and add another"]'
).click()
self.assertEqual(Section.objects.count(), 2)
def test_redirect_on_add_view_continue_button(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin7:admin_views_section_add")
self.selenium.get(self.live_server_url + add_url)
name_input = self.selenium.find_element(By.ID, "id_name")
name_input.send_keys("Test section 1")
self.selenium.find_element(
By.XPATH, '//input[@value="Save and continue editing"]'
).click()
self.assertEqual(Section.objects.count(), 1)
name_input = self.selenium.find_element(By.ID, "id_name")
name_input_value = name_input.get_attribute("value")
self.assertEqual(name_input_value, "Test section 1")
@override_settings(ROOT_URLCONF="admin_views.urls")
class ReadonlyTest(AdminFieldExtractionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_readonly_get(self):
response = self.client.get(reverse("admin:admin_views_post_add"))
self.assertNotContains(response, 'name="posted"')
# 3 fields + 2 submit buttons + 5 inline management form fields, + 2
# hidden fields for inlines + 1 field for the inline + 2 empty form
# + 1 logout form.
self.assertContains(response, "<input", count=17)
self.assertContains(response, formats.localize(datetime.date.today()))
self.assertContains(response, "<label>Awesomeness level:</label>")
self.assertContains(response, "Very awesome.")
self.assertContains(response, "Unknown coolness.")
self.assertContains(response, "foo")
# Multiline text in a readonly field gets <br> tags
self.assertContains(response, "Multiline<br>test<br>string")
self.assertContains(
response,
'<div class="readonly">Multiline<br>html<br>content</div>',
html=True,
)
self.assertContains(response, "InlineMultiline<br>test<br>string")
self.assertContains(
response,
formats.localize(datetime.date.today() - datetime.timedelta(days=7)),
)
self.assertContains(response, '<div class="form-row field-coolness">')
self.assertContains(response, '<div class="form-row field-awesomeness_level">')
self.assertContains(response, '<div class="form-row field-posted">')
self.assertContains(response, '<div class="form-row field-value">')
self.assertContains(response, '<div class="form-row">')
self.assertContains(response, '<div class="help"', 3)
self.assertContains(
response,
'<div class="help" id="id_title_helptext">Some help text for the title '
"(with Unicode ŠĐĆŽćžšđ)</div>",
html=True,
)
self.assertContains(
response,
'<div class="help" id="id_content_helptext">Some help text for the content '
"(with Unicode ŠĐĆŽćžšđ)</div>",
html=True,
)
self.assertContains(
response,
'<div class="help">Some help text for the date (with Unicode ŠĐĆŽćžšđ)'
"</div>",
html=True,
)
p = Post.objects.create(
title="I worked on readonly_fields", content="Its good stuff"
)
response = self.client.get(
reverse("admin:admin_views_post_change", args=(p.pk,))
)
self.assertContains(response, "%d amount of cool" % p.pk)
def test_readonly_text_field(self):
p = Post.objects.create(
title="Readonly test",
content="test",
readonly_content="test\r\n\r\ntest\r\n\r\ntest\r\n\r\ntest",
)
Link.objects.create(
url="http://www.djangoproject.com",
post=p,
readonly_link_content="test\r\nlink",
)
response = self.client.get(
reverse("admin:admin_views_post_change", args=(p.pk,))
)
# Checking readonly field.
self.assertContains(response, "test<br><br>test<br><br>test<br><br>test")
# Checking readonly field in inline.
self.assertContains(response, "test<br>link")
def test_readonly_post(self):
data = {
"title": "Django Got Readonly Fields",
"content": "This is an incredible development.",
"link_set-TOTAL_FORMS": "1",
"link_set-INITIAL_FORMS": "0",
"link_set-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse("admin:admin_views_post_add"), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Post.objects.count(), 1)
p = Post.objects.get()
self.assertEqual(p.posted, datetime.date.today())
data["posted"] = "10-8-1990" # some date that's not today
response = self.client.post(reverse("admin:admin_views_post_add"), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Post.objects.count(), 2)
p = Post.objects.order_by("-id")[0]
self.assertEqual(p.posted, datetime.date.today())
def test_readonly_manytomany(self):
"Regression test for #13004"
response = self.client.get(reverse("admin:admin_views_pizza_add"))
self.assertEqual(response.status_code, 200)
def test_user_password_change_limited_queryset(self):
su = User.objects.filter(is_superuser=True)[0]
response = self.client.get(
reverse("admin2:auth_user_password_change", args=(su.pk,))
)
self.assertEqual(response.status_code, 404)
def test_change_form_renders_correct_null_choice_value(self):
"""
Regression test for #17911.
"""
choice = Choice.objects.create(choice=None)
response = self.client.get(
reverse("admin:admin_views_choice_change", args=(choice.pk,))
)
self.assertContains(
response, '<div class="readonly">No opinion</div>', html=True
)
def _test_readonly_foreignkey_links(self, admin_site):
"""
ForeignKey readonly fields render as links if the target model is
registered in admin.
"""
chapter = Chapter.objects.create(
title="Chapter 1",
content="content",
book=Book.objects.create(name="Book 1"),
)
language = Language.objects.create(iso="_40", name="Test")
obj = ReadOnlyRelatedField.objects.create(
chapter=chapter,
language=language,
user=self.superuser,
)
response = self.client.get(
reverse(
f"{admin_site}:admin_views_readonlyrelatedfield_change", args=(obj.pk,)
),
)
# Related ForeignKey object registered in admin.
user_url = reverse(f"{admin_site}:auth_user_change", args=(self.superuser.pk,))
self.assertContains(
response,
'<div class="readonly"><a href="%s">super</a></div>' % user_url,
html=True,
)
# Related ForeignKey with the string primary key registered in admin.
language_url = reverse(
f"{admin_site}:admin_views_language_change",
args=(quote(language.pk),),
)
self.assertContains(
response,
'<div class="readonly"><a href="%s">_40</a></div>' % language_url,
html=True,
)
# Related ForeignKey object not registered in admin.
self.assertContains(
response, '<div class="readonly">Chapter 1</div>', html=True
)
def test_readonly_foreignkey_links_default_admin_site(self):
self._test_readonly_foreignkey_links("admin")
def test_readonly_foreignkey_links_custom_admin_site(self):
self._test_readonly_foreignkey_links("namespaced_admin")
def test_readonly_manytomany_backwards_ref(self):
"""
Regression test for #16433 - backwards references for related objects
broke if the related field is read-only due to the help_text attribute
"""
topping = Topping.objects.create(name="Salami")
pizza = Pizza.objects.create(name="Americano")
pizza.toppings.add(topping)
response = self.client.get(reverse("admin:admin_views_topping_add"))
self.assertEqual(response.status_code, 200)
def test_readonly_manytomany_forwards_ref(self):
topping = Topping.objects.create(name="Salami")
pizza = Pizza.objects.create(name="Americano")
pizza.toppings.add(topping)
response = self.client.get(
reverse("admin:admin_views_pizza_change", args=(pizza.pk,))
)
self.assertContains(response, "<label>Toppings:</label>", html=True)
self.assertContains(response, '<div class="readonly">Salami</div>', html=True)
def test_readonly_onetoone_backwards_ref(self):
"""
Can reference a reverse OneToOneField in ModelAdmin.readonly_fields.
"""
v1 = Villain.objects.create(name="Adam")
pl = Plot.objects.create(name="Test Plot", team_leader=v1, contact=v1)
pd = PlotDetails.objects.create(details="Brand New Plot", plot=pl)
response = self.client.get(
reverse("admin:admin_views_plotproxy_change", args=(pl.pk,))
)
field = self.get_admin_readonly_field(response, "plotdetails")
pd_url = reverse("admin:admin_views_plotdetails_change", args=(pd.pk,))
self.assertEqual(field.contents(), '<a href="%s">Brand New Plot</a>' % pd_url)
# The reverse relation also works if the OneToOneField is null.
pd.plot = None
pd.save()
response = self.client.get(
reverse("admin:admin_views_plotproxy_change", args=(pl.pk,))
)
field = self.get_admin_readonly_field(response, "plotdetails")
self.assertEqual(field.contents(), "-") # default empty value
def test_readonly_field_overrides(self):
"""
Regression test for #22087 - ModelForm Meta overrides are ignored by
AdminReadonlyField
"""
p = FieldOverridePost.objects.create(title="Test Post", content="Test Content")
response = self.client.get(
reverse("admin:admin_views_fieldoverridepost_change", args=(p.pk,))
)
self.assertContains(
response,
'<div class="help">Overridden help text for the date</div>',
html=True,
)
self.assertContains(
response,
'<label for="id_public">Overridden public label:</label>',
html=True,
)
self.assertNotContains(
response, "Some help text for the date (with Unicode ŠĐĆŽćžšđ)"
)
def test_correct_autoescaping(self):
"""
Make sure that non-field readonly elements are properly autoescaped (#24461)
"""
section = Section.objects.create(name="<a>evil</a>")
response = self.client.get(
reverse("admin:admin_views_section_change", args=(section.pk,))
)
self.assertNotContains(response, "<a>evil</a>", status_code=200)
self.assertContains(response, "<a>evil</a>", status_code=200)
def test_label_suffix_translated(self):
pizza = Pizza.objects.create(name="Americano")
url = reverse("admin:admin_views_pizza_change", args=(pizza.pk,))
with self.settings(LANGUAGE_CODE="fr"):
response = self.client.get(url)
self.assertContains(response, "<label>Toppings\u00A0:</label>", html=True)
@override_settings(ROOT_URLCONF="admin_views.urls")
class LimitChoicesToInAdminTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_limit_choices_to_as_callable(self):
"""Test for ticket 2445 changes to admin."""
threepwood = Character.objects.create(
username="threepwood",
last_action=datetime.datetime.today() + datetime.timedelta(days=1),
)
marley = Character.objects.create(
username="marley",
last_action=datetime.datetime.today() - datetime.timedelta(days=1),
)
response = self.client.get(reverse("admin:admin_views_stumpjoke_add"))
# The allowed option should appear twice; the limited option should not appear.
self.assertContains(response, threepwood.username, count=2)
self.assertNotContains(response, marley.username)
@override_settings(ROOT_URLCONF="admin_views.urls")
class RawIdFieldsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_limit_choices_to(self):
"""Regression test for 14880"""
actor = Actor.objects.create(name="Palin", age=27)
Inquisition.objects.create(expected=True, leader=actor, country="England")
Inquisition.objects.create(expected=False, leader=actor, country="Spain")
response = self.client.get(reverse("admin:admin_views_sketch_add"))
# Find the link
m = re.search(
rb'<a href="([^"]*)"[^>]* id="lookup_id_inquisition"', response.content
)
self.assertTrue(m) # Got a match
popup_url = m[1].decode().replace("&", "&")
# Handle relative links
popup_url = urljoin(response.request["PATH_INFO"], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step also tests integers, strings and booleans in the
# lookup query string; in model we define inquisition field to have a
# limit_choices_to option that includes a filter on a string field
# (inquisition__actor__name), a filter on an integer field
# (inquisition__actor__age), and a filter on a boolean field
# (inquisition__expected).
response2 = self.client.get(popup_url)
self.assertContains(response2, "Spain")
self.assertNotContains(response2, "England")
def test_limit_choices_to_isnull_false(self):
"""Regression test for 20182"""
Actor.objects.create(name="Palin", age=27)
Actor.objects.create(name="Kilbraken", age=50, title="Judge")
response = self.client.get(reverse("admin:admin_views_sketch_add"))
# Find the link
m = re.search(
rb'<a href="([^"]*)"[^>]* id="lookup_id_defendant0"', response.content
)
self.assertTrue(m) # Got a match
popup_url = m[1].decode().replace("&", "&")
# Handle relative links
popup_url = urljoin(response.request["PATH_INFO"], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step tests field__isnull=0 gets parsed correctly from the
# lookup query string; in model we define defendant0 field to have a
# limit_choices_to option that includes "actor__title__isnull=False".
response2 = self.client.get(popup_url)
self.assertContains(response2, "Kilbraken")
self.assertNotContains(response2, "Palin")
def test_limit_choices_to_isnull_true(self):
"""Regression test for 20182"""
Actor.objects.create(name="Palin", age=27)
Actor.objects.create(name="Kilbraken", age=50, title="Judge")
response = self.client.get(reverse("admin:admin_views_sketch_add"))
# Find the link
m = re.search(
rb'<a href="([^"]*)"[^>]* id="lookup_id_defendant1"', response.content
)
self.assertTrue(m) # Got a match
popup_url = m[1].decode().replace("&", "&")
# Handle relative links
popup_url = urljoin(response.request["PATH_INFO"], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step tests field__isnull=1 gets parsed correctly from the
# lookup query string; in model we define defendant1 field to have a
# limit_choices_to option that includes "actor__title__isnull=True".
response2 = self.client.get(popup_url)
self.assertNotContains(response2, "Kilbraken")
self.assertContains(response2, "Palin")
def test_list_display_method_same_name_as_reverse_accessor(self):
"""
Should be able to use a ModelAdmin method in list_display that has the
same name as a reverse model field ("sketch" in this case).
"""
actor = Actor.objects.create(name="Palin", age=27)
Inquisition.objects.create(expected=True, leader=actor, country="England")
response = self.client.get(reverse("admin:admin_views_inquisition_changelist"))
self.assertContains(response, "list-display-sketch")
@override_settings(ROOT_URLCONF="admin_views.urls")
class UserAdminTest(TestCase):
"""
Tests user CRUD functionality.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.adduser = User.objects.create_user(
username="adduser", password="secret", is_staff=True
)
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.per1 = Person.objects.create(name="John Mauchly", gender=1, alive=True)
cls.per2 = Person.objects.create(name="Grace Hopper", gender=1, alive=False)
cls.per3 = Person.objects.create(name="Guido van Rossum", gender=1, alive=True)
def setUp(self):
self.client.force_login(self.superuser)
def test_save_button(self):
user_count = User.objects.count()
response = self.client.post(
reverse("admin:auth_user_add"),
{
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
},
)
new_user = User.objects.get(username="newuser")
self.assertRedirects(
response, reverse("admin:auth_user_change", args=(new_user.pk,))
)
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
def test_save_continue_editing_button(self):
user_count = User.objects.count()
response = self.client.post(
reverse("admin:auth_user_add"),
{
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
"_continue": "1",
},
)
new_user = User.objects.get(username="newuser")
new_user_url = reverse("admin:auth_user_change", args=(new_user.pk,))
self.assertRedirects(response, new_user_url, fetch_redirect_response=False)
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
response = self.client.get(new_user_url)
self.assertContains(
response,
'<li class="success">The user “<a href="%s">'
"%s</a>” was added successfully. You may edit it again below.</li>"
% (new_user_url, new_user),
html=True,
)
def test_password_mismatch(self):
response = self.client.post(
reverse("admin:auth_user_add"),
{
"username": "newuser",
"password1": "newpassword",
"password2": "mismatch",
},
)
self.assertEqual(response.status_code, 200)
self.assertFormError(response.context["adminform"], "password1", [])
self.assertFormError(
response.context["adminform"],
"password2",
["The two password fields didn’t match."],
)
def test_user_fk_add_popup(self):
"""
User addition through a FK popup should return the appropriate
JavaScript response.
"""
response = self.client.get(reverse("admin:admin_views_album_add"))
self.assertContains(response, reverse("admin:auth_user_add"))
self.assertContains(
response,
'class="related-widget-wrapper-link add-related" id="add_id_owner"',
)
response = self.client.get(
reverse("admin:auth_user_add") + "?%s=1" % IS_POPUP_VAR
)
self.assertNotContains(response, 'name="_continue"')
self.assertNotContains(response, 'name="_addanother"')
data = {
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
IS_POPUP_VAR: "1",
"_save": "1",
}
response = self.client.post(
reverse("admin:auth_user_add") + "?%s=1" % IS_POPUP_VAR, data, follow=True
)
self.assertContains(response, ""obj": "newuser"")
def test_user_fk_change_popup(self):
"""
User change through a FK popup should return the appropriate JavaScript
response.
"""
response = self.client.get(reverse("admin:admin_views_album_add"))
self.assertContains(
response, reverse("admin:auth_user_change", args=("__fk__",))
)
self.assertContains(
response,
'class="related-widget-wrapper-link change-related" id="change_id_owner"',
)
user = User.objects.get(username="changeuser")
url = (
reverse("admin:auth_user_change", args=(user.pk,)) + "?%s=1" % IS_POPUP_VAR
)
response = self.client.get(url)
self.assertNotContains(response, 'name="_continue"')
self.assertNotContains(response, 'name="_addanother"')
data = {
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
"last_login_0": "2007-05-30",
"last_login_1": "13:20:10",
"date_joined_0": "2007-05-30",
"date_joined_1": "13:20:10",
IS_POPUP_VAR: "1",
"_save": "1",
}
response = self.client.post(url, data, follow=True)
self.assertContains(response, ""obj": "newuser"")
self.assertContains(response, ""action": "change"")
def test_user_fk_delete_popup(self):
"""
User deletion through a FK popup should return the appropriate
JavaScript response.
"""
response = self.client.get(reverse("admin:admin_views_album_add"))
self.assertContains(
response, reverse("admin:auth_user_delete", args=("__fk__",))
)
self.assertContains(
response,
'class="related-widget-wrapper-link change-related" id="change_id_owner"',
)
user = User.objects.get(username="changeuser")
url = (
reverse("admin:auth_user_delete", args=(user.pk,)) + "?%s=1" % IS_POPUP_VAR
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = {
"post": "yes",
IS_POPUP_VAR: "1",
}
response = self.client.post(url, data, follow=True)
self.assertContains(response, ""action": "delete"")
def test_save_add_another_button(self):
user_count = User.objects.count()
response = self.client.post(
reverse("admin:auth_user_add"),
{
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
"_addanother": "1",
},
)
new_user = User.objects.order_by("-id")[0]
self.assertRedirects(response, reverse("admin:auth_user_add"))
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
def test_user_permission_performance(self):
u = User.objects.all()[0]
# Don't depend on a warm cache, see #17377.
ContentType.objects.clear_cache()
expected_num_queries = 10 if connection.features.uses_savepoints else 8
with self.assertNumQueries(expected_num_queries):
response = self.client.get(reverse("admin:auth_user_change", args=(u.pk,)))
self.assertEqual(response.status_code, 200)
def test_form_url_present_in_context(self):
u = User.objects.all()[0]
response = self.client.get(
reverse("admin3:auth_user_password_change", args=(u.pk,))
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["form_url"], "pony")
@override_settings(ROOT_URLCONF="admin_views.urls")
class GroupAdminTest(TestCase):
"""
Tests group CRUD functionality.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_save_button(self):
group_count = Group.objects.count()
response = self.client.post(
reverse("admin:auth_group_add"),
{
"name": "newgroup",
},
)
Group.objects.order_by("-id")[0]
self.assertRedirects(response, reverse("admin:auth_group_changelist"))
self.assertEqual(Group.objects.count(), group_count + 1)
def test_group_permission_performance(self):
g = Group.objects.create(name="test_group")
# Ensure no queries are skipped due to cached content type for Group.
ContentType.objects.clear_cache()
expected_num_queries = 8 if connection.features.uses_savepoints else 6
with self.assertNumQueries(expected_num_queries):
response = self.client.get(reverse("admin:auth_group_change", args=(g.pk,)))
self.assertEqual(response.status_code, 200)
@override_settings(ROOT_URLCONF="admin_views.urls")
class CSSTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_field_prefix_css_classes(self):
"""
Fields have a CSS class name with a 'field-' prefix.
"""
response = self.client.get(reverse("admin:admin_views_post_add"))
# The main form
self.assertContains(response, 'class="form-row field-title"')
self.assertContains(response, 'class="form-row field-content"')
self.assertContains(response, 'class="form-row field-public"')
self.assertContains(response, 'class="form-row field-awesomeness_level"')
self.assertContains(response, 'class="form-row field-coolness"')
self.assertContains(response, 'class="form-row field-value"')
self.assertContains(response, 'class="form-row"') # The lambda function
# The tabular inline
self.assertContains(response, '<td class="field-url">')
self.assertContains(response, '<td class="field-posted">')
def test_index_css_classes(self):
"""
CSS class names are used for each app and model on the admin index
pages (#17050).
"""
# General index page
response = self.client.get(reverse("admin:index"))
self.assertContains(response, '<div class="app-admin_views module')
self.assertContains(response, '<tr class="model-actor">')
self.assertContains(response, '<tr class="model-album">')
# App index page
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertContains(response, '<div class="app-admin_views module')
self.assertContains(response, '<tr class="model-actor">')
self.assertContains(response, '<tr class="model-album">')
def test_app_model_in_form_body_class(self):
"""
Ensure app and model tag are correctly read by change_form template
"""
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertContains(response, '<body class=" app-admin_views model-section ')
def test_app_model_in_list_body_class(self):
"""
Ensure app and model tag are correctly read by change_list template
"""
response = self.client.get(reverse("admin:admin_views_section_changelist"))
self.assertContains(response, '<body class=" app-admin_views model-section ')
def test_app_model_in_delete_confirmation_body_class(self):
"""
Ensure app and model tag are correctly read by delete_confirmation
template
"""
response = self.client.get(
reverse("admin:admin_views_section_delete", args=(self.s1.pk,))
)
self.assertContains(response, '<body class=" app-admin_views model-section ')
def test_app_model_in_app_index_body_class(self):
"""
Ensure app and model tag are correctly read by app_index template
"""
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertContains(response, '<body class=" dashboard app-admin_views')
def test_app_model_in_delete_selected_confirmation_body_class(self):
"""
Ensure app and model tag are correctly read by
delete_selected_confirmation template
"""
action_data = {
ACTION_CHECKBOX_NAME: [self.s1.pk],
"action": "delete_selected",
"index": 0,
}
response = self.client.post(
reverse("admin:admin_views_section_changelist"), action_data
)
self.assertContains(response, '<body class=" app-admin_views model-section ')
def test_changelist_field_classes(self):
"""
Cells of the change list table should contain the field name in their
class attribute.
"""
Podcast.objects.create(name="Django Dose", release_date=datetime.date.today())
response = self.client.get(reverse("admin:admin_views_podcast_changelist"))
self.assertContains(response, '<th class="field-name">')
self.assertContains(response, '<td class="field-release_date nowrap">')
self.assertContains(response, '<td class="action-checkbox">')
try:
import docutils
except ImportError:
docutils = None
@unittest.skipUnless(docutils, "no docutils installed.")
@override_settings(ROOT_URLCONF="admin_views.urls")
@modify_settings(
INSTALLED_APPS={"append": ["django.contrib.admindocs", "django.contrib.flatpages"]}
)
class AdminDocsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_tags(self):
response = self.client.get(reverse("django-admindocs-tags"))
# The builtin tag group exists
self.assertContains(response, "<h2>Built-in tags</h2>", count=2, html=True)
# A builtin tag exists in both the index and detail
self.assertContains(
response, '<h3 id="built_in-autoescape">autoescape</h3>', html=True
)
self.assertContains(
response,
'<li><a href="#built_in-autoescape">autoescape</a></li>',
html=True,
)
# An app tag exists in both the index and detail
self.assertContains(
response, '<h3 id="flatpages-get_flatpages">get_flatpages</h3>', html=True
)
self.assertContains(
response,
'<li><a href="#flatpages-get_flatpages">get_flatpages</a></li>',
html=True,
)
# The admin list tag group exists
self.assertContains(response, "<h2>admin_list</h2>", count=2, html=True)
# An admin list tag exists in both the index and detail
self.assertContains(
response, '<h3 id="admin_list-admin_actions">admin_actions</h3>', html=True
)
self.assertContains(
response,
'<li><a href="#admin_list-admin_actions">admin_actions</a></li>',
html=True,
)
def test_filters(self):
response = self.client.get(reverse("django-admindocs-filters"))
# The builtin filter group exists
self.assertContains(response, "<h2>Built-in filters</h2>", count=2, html=True)
# A builtin filter exists in both the index and detail
self.assertContains(response, '<h3 id="built_in-add">add</h3>', html=True)
self.assertContains(
response, '<li><a href="#built_in-add">add</a></li>', html=True
)
@override_settings(
ROOT_URLCONF="admin_views.urls",
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
class ValidXHTMLTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_lang_name_present(self):
with translation.override(None):
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertNotContains(response, ' lang=""')
self.assertNotContains(response, ' xml:lang=""')
@override_settings(ROOT_URLCONF="admin_views.urls", USE_THOUSAND_SEPARATOR=True)
class DateHierarchyTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def assert_non_localized_year(self, response, year):
"""
The year is not localized with USE_THOUSAND_SEPARATOR (#15234).
"""
self.assertNotContains(response, formats.number_format(year))
def assert_contains_year_link(self, response, date):
self.assertContains(response, '?release_date__year=%d"' % date.year)
def assert_contains_month_link(self, response, date):
self.assertContains(
response,
'?release_date__month=%d&release_date__year=%d"'
% (date.month, date.year),
)
def assert_contains_day_link(self, response, date):
self.assertContains(
response,
"?release_date__day=%d&"
'release_date__month=%d&release_date__year=%d"'
% (date.day, date.month, date.year),
)
def test_empty(self):
"""
No date hierarchy links display with empty changelist.
"""
response = self.client.get(reverse("admin:admin_views_podcast_changelist"))
self.assertNotContains(response, "release_date__year=")
self.assertNotContains(response, "release_date__month=")
self.assertNotContains(response, "release_date__day=")
def test_single(self):
"""
Single day-level date hierarchy appears for single object.
"""
DATE = datetime.date(2000, 6, 30)
Podcast.objects.create(release_date=DATE)
url = reverse("admin:admin_views_podcast_changelist")
response = self.client.get(url)
self.assert_contains_day_link(response, DATE)
self.assert_non_localized_year(response, 2000)
def test_within_month(self):
"""
day-level links appear for changelist within single month.
"""
DATES = (
datetime.date(2000, 6, 30),
datetime.date(2000, 6, 15),
datetime.date(2000, 6, 3),
)
for date in DATES:
Podcast.objects.create(release_date=date)
url = reverse("admin:admin_views_podcast_changelist")
response = self.client.get(url)
for date in DATES:
self.assert_contains_day_link(response, date)
self.assert_non_localized_year(response, 2000)
def test_within_year(self):
"""
month-level links appear for changelist within single year.
"""
DATES = (
datetime.date(2000, 1, 30),
datetime.date(2000, 3, 15),
datetime.date(2000, 5, 3),
)
for date in DATES:
Podcast.objects.create(release_date=date)
url = reverse("admin:admin_views_podcast_changelist")
response = self.client.get(url)
# no day-level links
self.assertNotContains(response, "release_date__day=")
for date in DATES:
self.assert_contains_month_link(response, date)
self.assert_non_localized_year(response, 2000)
def test_multiple_years(self):
"""
year-level links appear for year-spanning changelist.
"""
DATES = (
datetime.date(2001, 1, 30),
datetime.date(2003, 3, 15),
datetime.date(2005, 5, 3),
)
for date in DATES:
Podcast.objects.create(release_date=date)
response = self.client.get(reverse("admin:admin_views_podcast_changelist"))
# no day/month-level links
self.assertNotContains(response, "release_date__day=")
self.assertNotContains(response, "release_date__month=")
for date in DATES:
self.assert_contains_year_link(response, date)
# and make sure GET parameters still behave correctly
for date in DATES:
url = "%s?release_date__year=%d" % (
reverse("admin:admin_views_podcast_changelist"),
date.year,
)
response = self.client.get(url)
self.assert_contains_month_link(response, date)
self.assert_non_localized_year(response, 2000)
self.assert_non_localized_year(response, 2003)
self.assert_non_localized_year(response, 2005)
url = "%s?release_date__year=%d&release_date__month=%d" % (
reverse("admin:admin_views_podcast_changelist"),
date.year,
date.month,
)
response = self.client.get(url)
self.assert_contains_day_link(response, date)
self.assert_non_localized_year(response, 2000)
self.assert_non_localized_year(response, 2003)
self.assert_non_localized_year(response, 2005)
def test_related_field(self):
questions_data = (
# (posted data, number of answers),
(datetime.date(2001, 1, 30), 0),
(datetime.date(2003, 3, 15), 1),
(datetime.date(2005, 5, 3), 2),
)
for date, answer_count in questions_data:
question = Question.objects.create(posted=date)
for i in range(answer_count):
question.answer_set.create()
response = self.client.get(reverse("admin:admin_views_answer_changelist"))
for date, answer_count in questions_data:
link = '?question__posted__year=%d"' % date.year
if answer_count > 0:
self.assertContains(response, link)
else:
self.assertNotContains(response, link)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminCustomSaveRelatedTests(TestCase):
"""
One can easily customize the way related objects are saved.
Refs #16115.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_should_be_able_to_edit_related_objects_on_add_view(self):
post = {
"child_set-TOTAL_FORMS": "3",
"child_set-INITIAL_FORMS": "0",
"name": "Josh Stone",
"child_set-0-name": "Paul",
"child_set-1-name": "Catherine",
}
self.client.post(reverse("admin:admin_views_parent_add"), post)
self.assertEqual(1, Parent.objects.count())
self.assertEqual(2, Child.objects.count())
children_names = list(
Child.objects.order_by("name").values_list("name", flat=True)
)
self.assertEqual("Josh Stone", Parent.objects.latest("id").name)
self.assertEqual(["Catherine Stone", "Paul Stone"], children_names)
def test_should_be_able_to_edit_related_objects_on_change_view(self):
parent = Parent.objects.create(name="Josh Stone")
paul = Child.objects.create(parent=parent, name="Paul")
catherine = Child.objects.create(parent=parent, name="Catherine")
post = {
"child_set-TOTAL_FORMS": "5",
"child_set-INITIAL_FORMS": "2",
"name": "Josh Stone",
"child_set-0-name": "Paul",
"child_set-0-id": paul.id,
"child_set-1-name": "Catherine",
"child_set-1-id": catherine.id,
}
self.client.post(
reverse("admin:admin_views_parent_change", args=(parent.id,)), post
)
children_names = list(
Child.objects.order_by("name").values_list("name", flat=True)
)
self.assertEqual("Josh Stone", Parent.objects.latest("id").name)
self.assertEqual(["Catherine Stone", "Paul Stone"], children_names)
def test_should_be_able_to_edit_related_objects_on_changelist_view(self):
parent = Parent.objects.create(name="Josh Rock")
Child.objects.create(parent=parent, name="Paul")
Child.objects.create(parent=parent, name="Catherine")
post = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": parent.id,
"form-0-name": "Josh Stone",
"_save": "Save",
}
self.client.post(reverse("admin:admin_views_parent_changelist"), post)
children_names = list(
Child.objects.order_by("name").values_list("name", flat=True)
)
self.assertEqual("Josh Stone", Parent.objects.latest("id").name)
self.assertEqual(["Catherine Stone", "Paul Stone"], children_names)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewLogoutTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def test_logout(self):
self.client.force_login(self.superuser)
response = self.client.post(reverse("admin:logout"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "registration/logged_out.html")
self.assertEqual(response.request["PATH_INFO"], reverse("admin:logout"))
self.assertFalse(response.context["has_permission"])
self.assertNotContains(
response, "user-tools"
) # user-tools div shouldn't visible.
def test_client_logout_url_can_be_used_to_login(self):
response = self.client.post(reverse("admin:logout"))
self.assertEqual(
response.status_code, 302
) # we should be redirected to the login page.
# follow the redirect and test results.
response = self.client.post(reverse("admin:logout"), follow=True)
self.assertContains(
response,
'<input type="hidden" name="next" value="%s">' % reverse("admin:index"),
)
self.assertTemplateUsed(response, "admin/login.html")
self.assertEqual(response.request["PATH_INFO"], reverse("admin:login"))
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminUserMessageTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def send_message(self, level):
"""
Helper that sends a post to the dummy test methods and asserts that a
message with the level has appeared in the response.
"""
action_data = {
ACTION_CHECKBOX_NAME: [1],
"action": "message_%s" % level,
"index": 0,
}
response = self.client.post(
reverse("admin:admin_views_usermessenger_changelist"),
action_data,
follow=True,
)
self.assertContains(
response, '<li class="%s">Test %s</li>' % (level, level), html=True
)
@override_settings(MESSAGE_LEVEL=10) # Set to DEBUG for this request
def test_message_debug(self):
self.send_message("debug")
def test_message_info(self):
self.send_message("info")
def test_message_success(self):
self.send_message("success")
def test_message_warning(self):
self.send_message("warning")
def test_message_error(self):
self.send_message("error")
def test_message_extra_tags(self):
action_data = {
ACTION_CHECKBOX_NAME: [1],
"action": "message_extra_tags",
"index": 0,
}
response = self.client.post(
reverse("admin:admin_views_usermessenger_changelist"),
action_data,
follow=True,
)
self.assertContains(
response, '<li class="extra_tag info">Test tags</li>', html=True
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminKeepChangeListFiltersTests(TestCase):
admin_site = site
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.joepublicuser = User.objects.create_user(
username="joepublic", password="secret"
)
def setUp(self):
self.client.force_login(self.superuser)
def assertURLEqual(self, url1, url2, msg_prefix=""):
"""
Assert that two URLs are equal despite the ordering
of their querystring. Refs #22360.
"""
parsed_url1 = urlparse(url1)
path1 = parsed_url1.path
parsed_qs1 = dict(parse_qsl(parsed_url1.query))
parsed_url2 = urlparse(url2)
path2 = parsed_url2.path
parsed_qs2 = dict(parse_qsl(parsed_url2.query))
for parsed_qs in [parsed_qs1, parsed_qs2]:
if "_changelist_filters" in parsed_qs:
changelist_filters = parsed_qs["_changelist_filters"]
parsed_filters = dict(parse_qsl(changelist_filters))
parsed_qs["_changelist_filters"] = parsed_filters
self.assertEqual(path1, path2)
self.assertEqual(parsed_qs1, parsed_qs2)
def test_assert_url_equal(self):
# Test equality.
change_user_url = reverse(
"admin:auth_user_change", args=(self.joepublicuser.pk,)
)
self.assertURLEqual(
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
)
# Test inequality.
with self.assertRaises(AssertionError):
self.assertURLEqual(
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D1%26is_superuser__exact%3D1".format(change_user_url),
)
# Ignore scheme and host.
self.assertURLEqual(
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
"{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
)
# Ignore ordering of querystring.
self.assertURLEqual(
"{}?is_staff__exact=0&is_superuser__exact=0".format(
reverse("admin:auth_user_changelist")
),
"{}?is_superuser__exact=0&is_staff__exact=0".format(
reverse("admin:auth_user_changelist")
),
)
# Ignore ordering of _changelist_filters.
self.assertURLEqual(
"{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
"{}?_changelist_filters="
"is_superuser__exact%3D0%26is_staff__exact%3D0".format(change_user_url),
)
def get_changelist_filters(self):
return {
"is_superuser__exact": 0,
"is_staff__exact": 0,
}
def get_changelist_filters_querystring(self):
return urlencode(self.get_changelist_filters())
def get_preserved_filters_querystring(self):
return urlencode(
{"_changelist_filters": self.get_changelist_filters_querystring()}
)
def get_sample_user_id(self):
return self.joepublicuser.pk
def get_changelist_url(self):
return "%s?%s" % (
reverse("admin:auth_user_changelist", current_app=self.admin_site.name),
self.get_changelist_filters_querystring(),
)
def get_add_url(self, add_preserved_filters=True):
url = reverse("admin:auth_user_add", current_app=self.admin_site.name)
if add_preserved_filters:
url = "%s?%s" % (url, self.get_preserved_filters_querystring())
return url
def get_change_url(self, user_id=None, add_preserved_filters=True):
if user_id is None:
user_id = self.get_sample_user_id()
url = reverse(
"admin:auth_user_change", args=(user_id,), current_app=self.admin_site.name
)
if add_preserved_filters:
url = "%s?%s" % (url, self.get_preserved_filters_querystring())
return url
def get_history_url(self, user_id=None):
if user_id is None:
user_id = self.get_sample_user_id()
return "%s?%s" % (
reverse(
"admin:auth_user_history",
args=(user_id,),
current_app=self.admin_site.name,
),
self.get_preserved_filters_querystring(),
)
def get_delete_url(self, user_id=None):
if user_id is None:
user_id = self.get_sample_user_id()
return "%s?%s" % (
reverse(
"admin:auth_user_delete",
args=(user_id,),
current_app=self.admin_site.name,
),
self.get_preserved_filters_querystring(),
)
def test_changelist_view(self):
response = self.client.get(self.get_changelist_url())
self.assertEqual(response.status_code, 200)
# Check the `change_view` link has the correct querystring.
detail_link = re.search(
'<a href="(.*?)">{}</a>'.format(self.joepublicuser.username),
response.content.decode(),
)
self.assertURLEqual(detail_link[1], self.get_change_url())
def test_change_view(self):
# Get the `change_view`.
response = self.client.get(self.get_change_url())
self.assertEqual(response.status_code, 200)
# Check the form action.
form_action = re.search(
'<form action="(.*?)" method="post" id="user_form" novalidate>',
response.content.decode(),
)
self.assertURLEqual(
form_action[1], "?%s" % self.get_preserved_filters_querystring()
)
# Check the history link.
history_link = re.search(
'<a href="(.*?)" class="historylink">History</a>', response.content.decode()
)
self.assertURLEqual(history_link[1], self.get_history_url())
# Check the delete link.
delete_link = re.search(
'<a href="(.*?)" class="deletelink">Delete</a>', response.content.decode()
)
self.assertURLEqual(delete_link[1], self.get_delete_url())
# Test redirect on "Save".
post_data = {
"username": "joepublic",
"last_login_0": "2007-05-30",
"last_login_1": "13:20:10",
"date_joined_0": "2007-05-30",
"date_joined_1": "13:20:10",
}
post_data["_save"] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertRedirects(response, self.get_changelist_url())
post_data.pop("_save")
# Test redirect on "Save and continue".
post_data["_continue"] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertRedirects(response, self.get_change_url())
post_data.pop("_continue")
# Test redirect on "Save and add new".
post_data["_addanother"] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertRedirects(response, self.get_add_url())
post_data.pop("_addanother")
def test_change_view_without_preserved_filters(self):
response = self.client.get(self.get_change_url(add_preserved_filters=False))
# The action attribute is omitted.
self.assertContains(response, '<form method="post" id="user_form" novalidate>')
def test_add_view(self):
# Get the `add_view`.
response = self.client.get(self.get_add_url())
self.assertEqual(response.status_code, 200)
# Check the form action.
form_action = re.search(
'<form action="(.*?)" method="post" id="user_form" novalidate>',
response.content.decode(),
)
self.assertURLEqual(
form_action[1], "?%s" % self.get_preserved_filters_querystring()
)
post_data = {
"username": "dummy",
"password1": "test",
"password2": "test",
}
# Test redirect on "Save".
post_data["_save"] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertRedirects(
response, self.get_change_url(User.objects.get(username="dummy").pk)
)
post_data.pop("_save")
# Test redirect on "Save and continue".
post_data["username"] = "dummy2"
post_data["_continue"] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertRedirects(
response, self.get_change_url(User.objects.get(username="dummy2").pk)
)
post_data.pop("_continue")
# Test redirect on "Save and add new".
post_data["username"] = "dummy3"
post_data["_addanother"] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertRedirects(response, self.get_add_url())
post_data.pop("_addanother")
def test_add_view_without_preserved_filters(self):
response = self.client.get(self.get_add_url(add_preserved_filters=False))
# The action attribute is omitted.
self.assertContains(response, '<form method="post" id="user_form" novalidate>')
def test_delete_view(self):
# Test redirect on "Delete".
response = self.client.post(self.get_delete_url(), {"post": "yes"})
self.assertRedirects(response, self.get_changelist_url())
def test_url_prefix(self):
context = {
"preserved_filters": self.get_preserved_filters_querystring(),
"opts": User._meta,
}
prefixes = ("", "/prefix/", "/後台/")
for prefix in prefixes:
with self.subTest(prefix=prefix), override_script_prefix(prefix):
url = reverse(
"admin:auth_user_changelist", current_app=self.admin_site.name
)
self.assertURLEqual(
self.get_changelist_url(),
add_preserved_filters(context, url),
)
class NamespacedAdminKeepChangeListFiltersTests(AdminKeepChangeListFiltersTests):
admin_site = site2
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestLabelVisibility(TestCase):
"""#11277 -Labels of hidden fields in admin were not hidden."""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_all_fields_visible(self):
response = self.client.get(reverse("admin:admin_views_emptymodelvisible_add"))
self.assert_fieldline_visible(response)
self.assert_field_visible(response, "first")
self.assert_field_visible(response, "second")
def test_all_fields_hidden(self):
response = self.client.get(reverse("admin:admin_views_emptymodelhidden_add"))
self.assert_fieldline_hidden(response)
self.assert_field_hidden(response, "first")
self.assert_field_hidden(response, "second")
def test_mixin(self):
response = self.client.get(reverse("admin:admin_views_emptymodelmixin_add"))
self.assert_fieldline_visible(response)
self.assert_field_hidden(response, "first")
self.assert_field_visible(response, "second")
def assert_field_visible(self, response, field_name):
self.assertContains(response, '<div class="fieldBox field-%s">' % field_name)
def assert_field_hidden(self, response, field_name):
self.assertContains(
response, '<div class="fieldBox field-%s hidden">' % field_name
)
def assert_fieldline_visible(self, response):
self.assertContains(response, '<div class="form-row field-first field-second">')
def assert_fieldline_hidden(self, response):
self.assertContains(response, '<div class="form-row hidden')
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewOnSiteTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = State.objects.create(name="New York")
cls.s2 = State.objects.create(name="Illinois")
cls.s3 = State.objects.create(name="California")
cls.c1 = City.objects.create(state=cls.s1, name="New York")
cls.c2 = City.objects.create(state=cls.s2, name="Chicago")
cls.c3 = City.objects.create(state=cls.s3, name="San Francisco")
cls.r1 = Restaurant.objects.create(city=cls.c1, name="Italian Pizza")
cls.r2 = Restaurant.objects.create(city=cls.c1, name="Boulevard")
cls.r3 = Restaurant.objects.create(city=cls.c2, name="Chinese Dinner")
cls.r4 = Restaurant.objects.create(city=cls.c2, name="Angels")
cls.r5 = Restaurant.objects.create(city=cls.c2, name="Take Away")
cls.r6 = Restaurant.objects.create(city=cls.c3, name="The Unknown Restaurant")
cls.w1 = Worker.objects.create(work_at=cls.r1, name="Mario", surname="Rossi")
cls.w2 = Worker.objects.create(
work_at=cls.r1, name="Antonio", surname="Bianchi"
)
cls.w3 = Worker.objects.create(work_at=cls.r1, name="John", surname="Doe")
def setUp(self):
self.client.force_login(self.superuser)
def test_add_view_form_and_formsets_run_validation(self):
"""
Issue #20522
Verifying that if the parent form fails validation, the inlines also
run validation even if validation is contingent on parent form data.
Also, assertFormError() and assertFormsetError() is usable for admin
forms and formsets.
"""
# The form validation should fail because 'some_required_info' is
# not included on the parent form, and the family_name of the parent
# does not match that of the child
post_data = {
"family_name": "Test1",
"dependentchild_set-TOTAL_FORMS": "1",
"dependentchild_set-INITIAL_FORMS": "0",
"dependentchild_set-MAX_NUM_FORMS": "1",
"dependentchild_set-0-id": "",
"dependentchild_set-0-parent": "",
"dependentchild_set-0-family_name": "Test2",
}
response = self.client.post(
reverse("admin:admin_views_parentwithdependentchildren_add"), post_data
)
self.assertFormError(
response.context["adminform"],
"some_required_info",
["This field is required."],
)
self.assertFormError(response.context["adminform"], None, [])
self.assertFormsetError(
response.context["inline_admin_formset"],
0,
None,
[
"Children must share a family name with their parents in this "
"contrived test case"
],
)
self.assertFormsetError(
response.context["inline_admin_formset"], None, None, []
)
def test_change_view_form_and_formsets_run_validation(self):
"""
Issue #20522
Verifying that if the parent form fails validation, the inlines also
run validation even if validation is contingent on parent form data
"""
pwdc = ParentWithDependentChildren.objects.create(
some_required_info=6, family_name="Test1"
)
# The form validation should fail because 'some_required_info' is
# not included on the parent form, and the family_name of the parent
# does not match that of the child
post_data = {
"family_name": "Test2",
"dependentchild_set-TOTAL_FORMS": "1",
"dependentchild_set-INITIAL_FORMS": "0",
"dependentchild_set-MAX_NUM_FORMS": "1",
"dependentchild_set-0-id": "",
"dependentchild_set-0-parent": str(pwdc.id),
"dependentchild_set-0-family_name": "Test1",
}
response = self.client.post(
reverse(
"admin:admin_views_parentwithdependentchildren_change", args=(pwdc.id,)
),
post_data,
)
self.assertFormError(
response.context["adminform"],
"some_required_info",
["This field is required."],
)
self.assertFormsetError(
response.context["inline_admin_formset"],
0,
None,
[
"Children must share a family name with their parents in this "
"contrived test case"
],
)
def test_check(self):
"The view_on_site value is either a boolean or a callable"
try:
admin = CityAdmin(City, AdminSite())
CityAdmin.view_on_site = True
self.assertEqual(admin.check(), [])
CityAdmin.view_on_site = False
self.assertEqual(admin.check(), [])
CityAdmin.view_on_site = lambda obj: obj.get_absolute_url()
self.assertEqual(admin.check(), [])
CityAdmin.view_on_site = []
self.assertEqual(
admin.check(),
[
Error(
"The value of 'view_on_site' must be a callable or a boolean "
"value.",
obj=CityAdmin,
id="admin.E025",
),
],
)
finally:
# Restore the original values for the benefit of other tests.
CityAdmin.view_on_site = True
def test_false(self):
"The 'View on site' button is not displayed if view_on_site is False"
response = self.client.get(
reverse("admin:admin_views_restaurant_change", args=(self.r1.pk,))
)
content_type_pk = ContentType.objects.get_for_model(Restaurant).pk
self.assertNotContains(
response, reverse("admin:view_on_site", args=(content_type_pk, 1))
)
def test_true(self):
"The default behavior is followed if view_on_site is True"
response = self.client.get(
reverse("admin:admin_views_city_change", args=(self.c1.pk,))
)
content_type_pk = ContentType.objects.get_for_model(City).pk
self.assertContains(
response, reverse("admin:view_on_site", args=(content_type_pk, self.c1.pk))
)
def test_callable(self):
"The right link is displayed if view_on_site is a callable"
response = self.client.get(
reverse("admin:admin_views_worker_change", args=(self.w1.pk,))
)
self.assertContains(
response, '"/worker/%s/%s/"' % (self.w1.surname, self.w1.name)
)
def test_missing_get_absolute_url(self):
"None is returned if model doesn't have get_absolute_url"
model_admin = ModelAdmin(Worker, None)
self.assertIsNone(model_admin.get_view_on_site_url(Worker()))
def test_custom_admin_site(self):
model_admin = ModelAdmin(City, customadmin.site)
content_type_pk = ContentType.objects.get_for_model(City).pk
redirect_url = model_admin.get_view_on_site_url(self.c1)
self.assertEqual(
redirect_url,
reverse(
f"{customadmin.site.name}:view_on_site",
kwargs={
"content_type_id": content_type_pk,
"object_id": self.c1.pk,
},
),
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class InlineAdminViewOnSiteTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = State.objects.create(name="New York")
cls.s2 = State.objects.create(name="Illinois")
cls.s3 = State.objects.create(name="California")
cls.c1 = City.objects.create(state=cls.s1, name="New York")
cls.c2 = City.objects.create(state=cls.s2, name="Chicago")
cls.c3 = City.objects.create(state=cls.s3, name="San Francisco")
cls.r1 = Restaurant.objects.create(city=cls.c1, name="Italian Pizza")
cls.r2 = Restaurant.objects.create(city=cls.c1, name="Boulevard")
cls.r3 = Restaurant.objects.create(city=cls.c2, name="Chinese Dinner")
cls.r4 = Restaurant.objects.create(city=cls.c2, name="Angels")
cls.r5 = Restaurant.objects.create(city=cls.c2, name="Take Away")
cls.r6 = Restaurant.objects.create(city=cls.c3, name="The Unknown Restaurant")
cls.w1 = Worker.objects.create(work_at=cls.r1, name="Mario", surname="Rossi")
cls.w2 = Worker.objects.create(
work_at=cls.r1, name="Antonio", surname="Bianchi"
)
cls.w3 = Worker.objects.create(work_at=cls.r1, name="John", surname="Doe")
def setUp(self):
self.client.force_login(self.superuser)
def test_false(self):
"The 'View on site' button is not displayed if view_on_site is False"
response = self.client.get(
reverse("admin:admin_views_state_change", args=(self.s1.pk,))
)
content_type_pk = ContentType.objects.get_for_model(City).pk
self.assertNotContains(
response, reverse("admin:view_on_site", args=(content_type_pk, self.c1.pk))
)
def test_true(self):
"The 'View on site' button is displayed if view_on_site is True"
response = self.client.get(
reverse("admin:admin_views_city_change", args=(self.c1.pk,))
)
content_type_pk = ContentType.objects.get_for_model(Restaurant).pk
self.assertContains(
response, reverse("admin:view_on_site", args=(content_type_pk, self.r1.pk))
)
def test_callable(self):
"The right link is displayed if view_on_site is a callable"
response = self.client.get(
reverse("admin:admin_views_restaurant_change", args=(self.r1.pk,))
)
self.assertContains(
response, '"/worker_inline/%s/%s/"' % (self.w1.surname, self.w1.name)
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class GetFormsetsWithInlinesArgumentTest(TestCase):
"""
#23934 - When adding a new model instance in the admin, the 'obj' argument
of get_formsets_with_inlines() should be None. When changing, it should be
equal to the existing model instance.
The GetFormsetsArgumentCheckingAdmin ModelAdmin throws an exception
if obj is not None during add_view or obj is None during change_view.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_explicitly_provided_pk(self):
post_data = {"name": "1"}
response = self.client.post(
reverse("admin:admin_views_explicitlyprovidedpk_add"), post_data
)
self.assertEqual(response.status_code, 302)
post_data = {"name": "2"}
response = self.client.post(
reverse("admin:admin_views_explicitlyprovidedpk_change", args=(1,)),
post_data,
)
self.assertEqual(response.status_code, 302)
def test_implicitly_generated_pk(self):
post_data = {"name": "1"}
response = self.client.post(
reverse("admin:admin_views_implicitlygeneratedpk_add"), post_data
)
self.assertEqual(response.status_code, 302)
post_data = {"name": "2"}
response = self.client.post(
reverse("admin:admin_views_implicitlygeneratedpk_change", args=(1,)),
post_data,
)
self.assertEqual(response.status_code, 302)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminSiteFinalCatchAllPatternTests(TestCase):
"""
Verifies the behaviour of the admin catch-all view.
* Anonynous/non-staff users are redirected to login for all URLs, whether
otherwise valid or not.
* APPEND_SLASH is applied for staff if needed.
* Otherwise Http404.
* Catch-all view disabled via AdminSite.final_catch_all_view.
"""
def test_unknown_url_redirects_login_if_not_authenticated(self):
unknown_url = "/test_admin/admin/unknown/"
response = self.client.get(unknown_url)
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin:login"), unknown_url)
)
def test_unknown_url_404_if_authenticated(self):
superuser = User.objects.create_superuser(
username="super",
password="secret",
email="[email protected]",
)
self.client.force_login(superuser)
unknown_url = "/test_admin/admin/unknown/"
response = self.client.get(unknown_url)
self.assertEqual(response.status_code, 404)
def test_known_url_redirects_login_if_not_authenticated(self):
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url)
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin:login"), known_url)
)
def test_known_url_missing_slash_redirects_login_if_not_authenticated(self):
known_url = reverse("admin:admin_views_article_changelist")[:-1]
response = self.client.get(known_url)
# Redirects with the next URL also missing the slash.
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin:login"), known_url)
)
def test_non_admin_url_shares_url_prefix(self):
url = reverse("non_admin")[:-1]
response = self.client.get(url)
# Redirects with the next URL also missing the slash.
self.assertRedirects(response, "%s?next=%s" % (reverse("admin:login"), url))
def test_url_without_trailing_slash_if_not_authenticated(self):
url = reverse("admin:article_extra_json")
response = self.client.get(url)
self.assertRedirects(response, "%s?next=%s" % (reverse("admin:login"), url))
def test_unkown_url_without_trailing_slash_if_not_authenticated(self):
url = reverse("admin:article_extra_json")[:-1]
response = self.client.get(url)
self.assertRedirects(response, "%s?next=%s" % (reverse("admin:login"), url))
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_unknown_url(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
unknown_url = "/test_admin/admin/unknown/"
response = self.client.get(unknown_url[:-1])
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response, known_url, status_code=301, target_status_code=403
)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_script_name(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1], SCRIPT_NAME="/prefix/")
self.assertRedirects(
response,
"/prefix" + known_url,
status_code=301,
fetch_redirect_response=False,
)
@override_settings(APPEND_SLASH=True, FORCE_SCRIPT_NAME="/prefix/")
def test_missing_slash_append_slash_true_force_script_name(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response,
"/prefix" + known_url,
status_code=301,
fetch_redirect_response=False,
)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_non_staff_user(self):
user = User.objects.create_user(
username="user",
password="secret",
email="[email protected]",
is_staff=False,
)
self.client.force_login(user)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response,
"/test_admin/admin/login/?next=/test_admin/admin/admin_views/article",
)
@override_settings(APPEND_SLASH=False)
def test_missing_slash_append_slash_false(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_single_model_no_append_slash(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin9:admin_views_actor_changelist")
response = self.client.get(known_url[:-1])
self.assertEqual(response.status_code, 404)
# Same tests above with final_catch_all_view=False.
def test_unknown_url_404_if_not_authenticated_without_final_catch_all_view(self):
unknown_url = "/test_admin/admin10/unknown/"
response = self.client.get(unknown_url)
self.assertEqual(response.status_code, 404)
def test_unknown_url_404_if_authenticated_without_final_catch_all_view(self):
superuser = User.objects.create_superuser(
username="super",
password="secret",
email="[email protected]",
)
self.client.force_login(superuser)
unknown_url = "/test_admin/admin10/unknown/"
response = self.client.get(unknown_url)
self.assertEqual(response.status_code, 404)
def test_known_url_redirects_login_if_not_auth_without_final_catch_all_view(
self,
):
known_url = reverse("admin10:admin_views_article_changelist")
response = self.client.get(known_url)
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin10:login"), known_url)
)
def test_known_url_missing_slash_redirects_with_slash_if_not_auth_no_catch_all_view(
self,
):
known_url = reverse("admin10:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response, known_url, status_code=301, fetch_redirect_response=False
)
def test_non_admin_url_shares_url_prefix_without_final_catch_all_view(self):
url = reverse("non_admin10")
response = self.client.get(url[:-1])
self.assertRedirects(response, url, status_code=301)
def test_url_no_trailing_slash_if_not_auth_without_final_catch_all_view(
self,
):
url = reverse("admin10:article_extra_json")
response = self.client.get(url)
self.assertRedirects(response, "%s?next=%s" % (reverse("admin10:login"), url))
def test_unknown_url_no_trailing_slash_if_not_auth_without_final_catch_all_view(
self,
):
url = reverse("admin10:article_extra_json")[:-1]
response = self.client.get(url)
# Matches test_admin/admin10/admin_views/article/<path:object_id>/
self.assertRedirects(
response, url + "/", status_code=301, fetch_redirect_response=False
)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_unknown_url_without_final_catch_all_view(
self,
):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
unknown_url = "/test_admin/admin10/unknown/"
response = self.client.get(unknown_url[:-1])
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_without_final_catch_all_view(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin10:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response, known_url, status_code=301, target_status_code=403
)
@override_settings(APPEND_SLASH=False)
def test_missing_slash_append_slash_false_without_final_catch_all_view(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin10:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertEqual(response.status_code, 404)
# Outside admin.
def test_non_admin_url_404_if_not_authenticated(self):
unknown_url = "/unknown/"
response = self.client.get(unknown_url)
# Does not redirect to the admin login.
self.assertEqual(response.status_code, 404)
|
9a7533df595d036dce7246908cf57d34ed9c4decd775918987b2572ead9fd914 | from django.db import connection
from django.db.models import (
CharField,
F,
Func,
IntegerField,
OuterRef,
Q,
Subquery,
Value,
Window,
)
from django.db.models.fields.json import KeyTextTransform, KeyTransform
from django.db.models.functions import Cast, Concat, Substr
from django.test import skipUnlessDBFeature
from django.test.utils import Approximate, ignore_warnings
from django.utils import timezone
from django.utils.deprecation import RemovedInDjango50Warning
from . import PostgreSQLTestCase
from .models import AggregateTestModel, HotelReservation, Room, StatTestModel
try:
from django.contrib.postgres.aggregates import (
ArrayAgg,
BitAnd,
BitOr,
BitXor,
BoolAnd,
BoolOr,
Corr,
CovarPop,
JSONBAgg,
RegrAvgX,
RegrAvgY,
RegrCount,
RegrIntercept,
RegrR2,
RegrSlope,
RegrSXX,
RegrSXY,
RegrSYY,
StatAggregate,
StringAgg,
)
from django.contrib.postgres.fields import ArrayField
except ImportError:
pass # psycopg2 is not installed
class TestGeneralAggregate(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
cls.aggs = AggregateTestModel.objects.bulk_create(
[
AggregateTestModel(
boolean_field=True,
char_field="Foo1",
text_field="Text1",
integer_field=0,
),
AggregateTestModel(
boolean_field=False,
char_field="Foo2",
text_field="Text2",
integer_field=1,
json_field={"lang": "pl"},
),
AggregateTestModel(
boolean_field=False,
char_field="Foo4",
text_field="Text4",
integer_field=2,
json_field={"lang": "en"},
),
AggregateTestModel(
boolean_field=True,
char_field="Foo3",
text_field="Text3",
integer_field=0,
json_field={"breed": "collie"},
),
]
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_empty_result_set(self):
AggregateTestModel.objects.all().delete()
tests = [
(ArrayAgg("char_field"), []),
(ArrayAgg("integer_field"), []),
(ArrayAgg("boolean_field"), []),
(BitAnd("integer_field"), None),
(BitOr("integer_field"), None),
(BoolAnd("boolean_field"), None),
(BoolOr("boolean_field"), None),
(JSONBAgg("integer_field"), []),
(StringAgg("char_field", delimiter=";"), ""),
]
if connection.features.has_bit_xor:
tests.append((BitXor("integer_field"), None))
for aggregation, expected_result in tests:
with self.subTest(aggregation=aggregation):
# Empty result with non-execution optimization.
with self.assertNumQueries(0):
values = AggregateTestModel.objects.none().aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {"aggregation": expected_result})
# Empty result when query must be executed.
with self.assertNumQueries(1):
values = AggregateTestModel.objects.aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {"aggregation": expected_result})
def test_default_argument(self):
AggregateTestModel.objects.all().delete()
tests = [
(ArrayAgg("char_field", default=["<empty>"]), ["<empty>"]),
(ArrayAgg("integer_field", default=[0]), [0]),
(ArrayAgg("boolean_field", default=[False]), [False]),
(BitAnd("integer_field", default=0), 0),
(BitOr("integer_field", default=0), 0),
(BoolAnd("boolean_field", default=False), False),
(BoolOr("boolean_field", default=False), False),
(JSONBAgg("integer_field", default=Value('["<empty>"]')), ["<empty>"]),
(
StringAgg("char_field", delimiter=";", default=Value("<empty>")),
"<empty>",
),
]
if connection.features.has_bit_xor:
tests.append((BitXor("integer_field", default=0), 0))
for aggregation, expected_result in tests:
with self.subTest(aggregation=aggregation):
# Empty result with non-execution optimization.
with self.assertNumQueries(0):
values = AggregateTestModel.objects.none().aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {"aggregation": expected_result})
# Empty result when query must be executed.
with self.assertNumQueries(1):
values = AggregateTestModel.objects.aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {"aggregation": expected_result})
def test_convert_value_deprecation(self):
AggregateTestModel.objects.all().delete()
queryset = AggregateTestModel.objects.all()
with self.assertWarnsMessage(
RemovedInDjango50Warning, ArrayAgg.deprecation_msg
):
queryset.aggregate(aggregation=ArrayAgg("boolean_field"))
with self.assertWarnsMessage(
RemovedInDjango50Warning, JSONBAgg.deprecation_msg
):
queryset.aggregate(aggregation=JSONBAgg("integer_field"))
with self.assertWarnsMessage(
RemovedInDjango50Warning, StringAgg.deprecation_msg
):
queryset.aggregate(aggregation=StringAgg("char_field", delimiter=";"))
# No warnings raised if default argument provided.
self.assertEqual(
queryset.aggregate(aggregation=ArrayAgg("boolean_field", default=None)),
{"aggregation": None},
)
self.assertEqual(
queryset.aggregate(aggregation=JSONBAgg("integer_field", default=None)),
{"aggregation": None},
)
self.assertEqual(
queryset.aggregate(
aggregation=StringAgg("char_field", delimiter=";", default=None),
),
{"aggregation": None},
)
self.assertEqual(
queryset.aggregate(
aggregation=ArrayAgg("boolean_field", default=Value([]))
),
{"aggregation": []},
)
self.assertEqual(
queryset.aggregate(
aggregation=JSONBAgg("integer_field", default=Value("[]"))
),
{"aggregation": []},
)
self.assertEqual(
queryset.aggregate(
aggregation=StringAgg("char_field", delimiter=";", default=Value("")),
),
{"aggregation": ""},
)
def test_array_agg_charfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg("char_field"))
self.assertEqual(values, {"arrayagg": ["Foo1", "Foo2", "Foo4", "Foo3"]})
def test_array_agg_charfield_ordering(self):
ordering_test_cases = (
(F("char_field").desc(), ["Foo4", "Foo3", "Foo2", "Foo1"]),
(F("char_field").asc(), ["Foo1", "Foo2", "Foo3", "Foo4"]),
(F("char_field"), ["Foo1", "Foo2", "Foo3", "Foo4"]),
(
[F("boolean_field"), F("char_field").desc()],
["Foo4", "Foo2", "Foo3", "Foo1"],
),
(
(F("boolean_field"), F("char_field").desc()),
["Foo4", "Foo2", "Foo3", "Foo1"],
),
("char_field", ["Foo1", "Foo2", "Foo3", "Foo4"]),
("-char_field", ["Foo4", "Foo3", "Foo2", "Foo1"]),
(Concat("char_field", Value("@")), ["Foo1", "Foo2", "Foo3", "Foo4"]),
(Concat("char_field", Value("@")).desc(), ["Foo4", "Foo3", "Foo2", "Foo1"]),
(
(
Substr("char_field", 1, 1),
F("integer_field"),
Substr("char_field", 4, 1).desc(),
),
["Foo3", "Foo1", "Foo2", "Foo4"],
),
)
for ordering, expected_output in ordering_test_cases:
with self.subTest(ordering=ordering, expected_output=expected_output):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg("char_field", ordering=ordering)
)
self.assertEqual(values, {"arrayagg": expected_output})
def test_array_agg_integerfield(self):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg("integer_field")
)
self.assertEqual(values, {"arrayagg": [0, 1, 2, 0]})
def test_array_agg_integerfield_ordering(self):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg("integer_field", ordering=F("integer_field").desc())
)
self.assertEqual(values, {"arrayagg": [2, 1, 0, 0]})
def test_array_agg_booleanfield(self):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg("boolean_field")
)
self.assertEqual(values, {"arrayagg": [True, False, False, True]})
def test_array_agg_booleanfield_ordering(self):
ordering_test_cases = (
(F("boolean_field").asc(), [False, False, True, True]),
(F("boolean_field").desc(), [True, True, False, False]),
(F("boolean_field"), [False, False, True, True]),
)
for ordering, expected_output in ordering_test_cases:
with self.subTest(ordering=ordering, expected_output=expected_output):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg("boolean_field", ordering=ordering)
)
self.assertEqual(values, {"arrayagg": expected_output})
def test_array_agg_jsonfield(self):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg(
KeyTransform("lang", "json_field"),
filter=Q(json_field__lang__isnull=False),
),
)
self.assertEqual(values, {"arrayagg": ["pl", "en"]})
def test_array_agg_jsonfield_ordering(self):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg(
KeyTransform("lang", "json_field"),
filter=Q(json_field__lang__isnull=False),
ordering=KeyTransform("lang", "json_field"),
),
)
self.assertEqual(values, {"arrayagg": ["en", "pl"]})
def test_array_agg_filter(self):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg("integer_field", filter=Q(integer_field__gt=0)),
)
self.assertEqual(values, {"arrayagg": [1, 2]})
def test_array_agg_lookups(self):
aggr1 = AggregateTestModel.objects.create()
aggr2 = AggregateTestModel.objects.create()
StatTestModel.objects.create(related_field=aggr1, int1=1, int2=0)
StatTestModel.objects.create(related_field=aggr1, int1=2, int2=0)
StatTestModel.objects.create(related_field=aggr2, int1=3, int2=0)
StatTestModel.objects.create(related_field=aggr2, int1=4, int2=0)
qs = (
StatTestModel.objects.values("related_field")
.annotate(array=ArrayAgg("int1"))
.filter(array__overlap=[2])
.values_list("array", flat=True)
)
self.assertCountEqual(qs.get(), [1, 2])
def test_bit_and_general(self):
values = AggregateTestModel.objects.filter(integer_field__in=[0, 1]).aggregate(
bitand=BitAnd("integer_field")
)
self.assertEqual(values, {"bitand": 0})
def test_bit_and_on_only_true_values(self):
values = AggregateTestModel.objects.filter(integer_field=1).aggregate(
bitand=BitAnd("integer_field")
)
self.assertEqual(values, {"bitand": 1})
def test_bit_and_on_only_false_values(self):
values = AggregateTestModel.objects.filter(integer_field=0).aggregate(
bitand=BitAnd("integer_field")
)
self.assertEqual(values, {"bitand": 0})
def test_bit_or_general(self):
values = AggregateTestModel.objects.filter(integer_field__in=[0, 1]).aggregate(
bitor=BitOr("integer_field")
)
self.assertEqual(values, {"bitor": 1})
def test_bit_or_on_only_true_values(self):
values = AggregateTestModel.objects.filter(integer_field=1).aggregate(
bitor=BitOr("integer_field")
)
self.assertEqual(values, {"bitor": 1})
def test_bit_or_on_only_false_values(self):
values = AggregateTestModel.objects.filter(integer_field=0).aggregate(
bitor=BitOr("integer_field")
)
self.assertEqual(values, {"bitor": 0})
@skipUnlessDBFeature("has_bit_xor")
def test_bit_xor_general(self):
AggregateTestModel.objects.create(integer_field=3)
values = AggregateTestModel.objects.filter(
integer_field__in=[1, 3],
).aggregate(bitxor=BitXor("integer_field"))
self.assertEqual(values, {"bitxor": 2})
@skipUnlessDBFeature("has_bit_xor")
def test_bit_xor_on_only_true_values(self):
values = AggregateTestModel.objects.filter(
integer_field=1,
).aggregate(bitxor=BitXor("integer_field"))
self.assertEqual(values, {"bitxor": 1})
@skipUnlessDBFeature("has_bit_xor")
def test_bit_xor_on_only_false_values(self):
values = AggregateTestModel.objects.filter(
integer_field=0,
).aggregate(bitxor=BitXor("integer_field"))
self.assertEqual(values, {"bitxor": 0})
def test_bool_and_general(self):
values = AggregateTestModel.objects.aggregate(booland=BoolAnd("boolean_field"))
self.assertEqual(values, {"booland": False})
def test_bool_and_q_object(self):
values = AggregateTestModel.objects.aggregate(
booland=BoolAnd(Q(integer_field__gt=2)),
)
self.assertEqual(values, {"booland": False})
def test_bool_or_general(self):
values = AggregateTestModel.objects.aggregate(boolor=BoolOr("boolean_field"))
self.assertEqual(values, {"boolor": True})
def test_bool_or_q_object(self):
values = AggregateTestModel.objects.aggregate(
boolor=BoolOr(Q(integer_field__gt=2)),
)
self.assertEqual(values, {"boolor": False})
def test_string_agg_requires_delimiter(self):
with self.assertRaises(TypeError):
AggregateTestModel.objects.aggregate(stringagg=StringAgg("char_field"))
def test_string_agg_delimiter_escaping(self):
values = AggregateTestModel.objects.aggregate(
stringagg=StringAgg("char_field", delimiter="'")
)
self.assertEqual(values, {"stringagg": "Foo1'Foo2'Foo4'Foo3"})
def test_string_agg_charfield(self):
values = AggregateTestModel.objects.aggregate(
stringagg=StringAgg("char_field", delimiter=";")
)
self.assertEqual(values, {"stringagg": "Foo1;Foo2;Foo4;Foo3"})
def test_string_agg_default_output_field(self):
values = AggregateTestModel.objects.aggregate(
stringagg=StringAgg("text_field", delimiter=";"),
)
self.assertEqual(values, {"stringagg": "Text1;Text2;Text4;Text3"})
def test_string_agg_charfield_ordering(self):
ordering_test_cases = (
(F("char_field").desc(), "Foo4;Foo3;Foo2;Foo1"),
(F("char_field").asc(), "Foo1;Foo2;Foo3;Foo4"),
(F("char_field"), "Foo1;Foo2;Foo3;Foo4"),
("char_field", "Foo1;Foo2;Foo3;Foo4"),
("-char_field", "Foo4;Foo3;Foo2;Foo1"),
(Concat("char_field", Value("@")), "Foo1;Foo2;Foo3;Foo4"),
(Concat("char_field", Value("@")).desc(), "Foo4;Foo3;Foo2;Foo1"),
)
for ordering, expected_output in ordering_test_cases:
with self.subTest(ordering=ordering, expected_output=expected_output):
values = AggregateTestModel.objects.aggregate(
stringagg=StringAgg("char_field", delimiter=";", ordering=ordering)
)
self.assertEqual(values, {"stringagg": expected_output})
def test_string_agg_jsonfield_ordering(self):
values = AggregateTestModel.objects.aggregate(
stringagg=StringAgg(
KeyTextTransform("lang", "json_field"),
delimiter=";",
ordering=KeyTextTransform("lang", "json_field"),
output_field=CharField(),
),
)
self.assertEqual(values, {"stringagg": "en;pl"})
def test_string_agg_filter(self):
values = AggregateTestModel.objects.aggregate(
stringagg=StringAgg(
"char_field",
delimiter=";",
filter=Q(char_field__endswith="3") | Q(char_field__endswith="1"),
)
)
self.assertEqual(values, {"stringagg": "Foo1;Foo3"})
def test_orderable_agg_alternative_fields(self):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg("integer_field", ordering=F("char_field").asc())
)
self.assertEqual(values, {"arrayagg": [0, 1, 0, 2]})
def test_jsonb_agg(self):
values = AggregateTestModel.objects.aggregate(jsonbagg=JSONBAgg("char_field"))
self.assertEqual(values, {"jsonbagg": ["Foo1", "Foo2", "Foo4", "Foo3"]})
def test_jsonb_agg_charfield_ordering(self):
ordering_test_cases = (
(F("char_field").desc(), ["Foo4", "Foo3", "Foo2", "Foo1"]),
(F("char_field").asc(), ["Foo1", "Foo2", "Foo3", "Foo4"]),
(F("char_field"), ["Foo1", "Foo2", "Foo3", "Foo4"]),
("char_field", ["Foo1", "Foo2", "Foo3", "Foo4"]),
("-char_field", ["Foo4", "Foo3", "Foo2", "Foo1"]),
(Concat("char_field", Value("@")), ["Foo1", "Foo2", "Foo3", "Foo4"]),
(Concat("char_field", Value("@")).desc(), ["Foo4", "Foo3", "Foo2", "Foo1"]),
)
for ordering, expected_output in ordering_test_cases:
with self.subTest(ordering=ordering, expected_output=expected_output):
values = AggregateTestModel.objects.aggregate(
jsonbagg=JSONBAgg("char_field", ordering=ordering),
)
self.assertEqual(values, {"jsonbagg": expected_output})
def test_jsonb_agg_integerfield_ordering(self):
values = AggregateTestModel.objects.aggregate(
jsonbagg=JSONBAgg("integer_field", ordering=F("integer_field").desc()),
)
self.assertEqual(values, {"jsonbagg": [2, 1, 0, 0]})
def test_jsonb_agg_booleanfield_ordering(self):
ordering_test_cases = (
(F("boolean_field").asc(), [False, False, True, True]),
(F("boolean_field").desc(), [True, True, False, False]),
(F("boolean_field"), [False, False, True, True]),
)
for ordering, expected_output in ordering_test_cases:
with self.subTest(ordering=ordering, expected_output=expected_output):
values = AggregateTestModel.objects.aggregate(
jsonbagg=JSONBAgg("boolean_field", ordering=ordering),
)
self.assertEqual(values, {"jsonbagg": expected_output})
def test_jsonb_agg_jsonfield_ordering(self):
values = AggregateTestModel.objects.aggregate(
jsonbagg=JSONBAgg(
KeyTransform("lang", "json_field"),
filter=Q(json_field__lang__isnull=False),
ordering=KeyTransform("lang", "json_field"),
),
)
self.assertEqual(values, {"jsonbagg": ["en", "pl"]})
def test_jsonb_agg_key_index_transforms(self):
room101 = Room.objects.create(number=101)
room102 = Room.objects.create(number=102)
datetimes = [
timezone.datetime(2018, 6, 20),
timezone.datetime(2018, 6, 24),
timezone.datetime(2018, 6, 28),
]
HotelReservation.objects.create(
datespan=(datetimes[0].date(), datetimes[1].date()),
start=datetimes[0],
end=datetimes[1],
room=room102,
requirements={"double_bed": True, "parking": True},
)
HotelReservation.objects.create(
datespan=(datetimes[1].date(), datetimes[2].date()),
start=datetimes[1],
end=datetimes[2],
room=room102,
requirements={"double_bed": False, "sea_view": True, "parking": False},
)
HotelReservation.objects.create(
datespan=(datetimes[0].date(), datetimes[2].date()),
start=datetimes[0],
end=datetimes[2],
room=room101,
requirements={"sea_view": False},
)
values = (
Room.objects.annotate(
requirements=JSONBAgg(
"hotelreservation__requirements",
ordering="-hotelreservation__start",
)
)
.filter(requirements__0__sea_view=True)
.values("number", "requirements")
)
self.assertSequenceEqual(
values,
[
{
"number": 102,
"requirements": [
{"double_bed": False, "sea_view": True, "parking": False},
{"double_bed": True, "parking": True},
],
},
],
)
def test_string_agg_array_agg_ordering_in_subquery(self):
stats = []
for i, agg in enumerate(AggregateTestModel.objects.order_by("char_field")):
stats.append(StatTestModel(related_field=agg, int1=i, int2=i + 1))
stats.append(StatTestModel(related_field=agg, int1=i + 1, int2=i))
StatTestModel.objects.bulk_create(stats)
for aggregate, expected_result in (
(
ArrayAgg("stattestmodel__int1", ordering="-stattestmodel__int2"),
[
("Foo1", [0, 1]),
("Foo2", [1, 2]),
("Foo3", [2, 3]),
("Foo4", [3, 4]),
],
),
(
StringAgg(
Cast("stattestmodel__int1", CharField()),
delimiter=";",
ordering="-stattestmodel__int2",
),
[("Foo1", "0;1"), ("Foo2", "1;2"), ("Foo3", "2;3"), ("Foo4", "3;4")],
),
):
with self.subTest(aggregate=aggregate.__class__.__name__):
subquery = (
AggregateTestModel.objects.filter(
pk=OuterRef("pk"),
)
.annotate(agg=aggregate)
.values("agg")
)
values = (
AggregateTestModel.objects.annotate(
agg=Subquery(subquery),
)
.order_by("char_field")
.values_list("char_field", "agg")
)
self.assertEqual(list(values), expected_result)
def test_string_agg_array_agg_filter_in_subquery(self):
StatTestModel.objects.bulk_create(
[
StatTestModel(related_field=self.aggs[0], int1=0, int2=5),
StatTestModel(related_field=self.aggs[0], int1=1, int2=4),
StatTestModel(related_field=self.aggs[0], int1=2, int2=3),
]
)
for aggregate, expected_result in (
(
ArrayAgg("stattestmodel__int1", filter=Q(stattestmodel__int2__gt=3)),
[("Foo1", [0, 1]), ("Foo2", None)],
),
(
StringAgg(
Cast("stattestmodel__int2", CharField()),
delimiter=";",
filter=Q(stattestmodel__int1__lt=2),
),
[("Foo1", "5;4"), ("Foo2", None)],
),
):
with self.subTest(aggregate=aggregate.__class__.__name__):
subquery = (
AggregateTestModel.objects.filter(
pk=OuterRef("pk"),
)
.annotate(agg=aggregate)
.values("agg")
)
values = (
AggregateTestModel.objects.annotate(
agg=Subquery(subquery),
)
.filter(
char_field__in=["Foo1", "Foo2"],
)
.order_by("char_field")
.values_list("char_field", "agg")
)
self.assertEqual(list(values), expected_result)
def test_string_agg_filter_in_subquery_with_exclude(self):
subquery = (
AggregateTestModel.objects.annotate(
stringagg=StringAgg(
"char_field",
delimiter=";",
filter=Q(char_field__endswith="1"),
)
)
.exclude(stringagg="")
.values("id")
)
self.assertSequenceEqual(
AggregateTestModel.objects.filter(id__in=Subquery(subquery)),
[self.aggs[0]],
)
def test_ordering_isnt_cleared_for_array_subquery(self):
inner_qs = AggregateTestModel.objects.order_by("-integer_field")
qs = AggregateTestModel.objects.annotate(
integers=Func(
Subquery(inner_qs.values("integer_field")),
function="ARRAY",
output_field=ArrayField(base_field=IntegerField()),
),
)
self.assertSequenceEqual(
qs.first().integers,
inner_qs.values_list("integer_field", flat=True),
)
def test_window(self):
self.assertCountEqual(
AggregateTestModel.objects.annotate(
integers=Window(
expression=ArrayAgg("char_field"),
partition_by=F("integer_field"),
)
).values("integers", "char_field"),
[
{"integers": ["Foo1", "Foo3"], "char_field": "Foo1"},
{"integers": ["Foo1", "Foo3"], "char_field": "Foo3"},
{"integers": ["Foo2"], "char_field": "Foo2"},
{"integers": ["Foo4"], "char_field": "Foo4"},
],
)
class TestAggregateDistinct(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
AggregateTestModel.objects.create(char_field="Foo")
AggregateTestModel.objects.create(char_field="Foo")
AggregateTestModel.objects.create(char_field="Bar")
def test_string_agg_distinct_false(self):
values = AggregateTestModel.objects.aggregate(
stringagg=StringAgg("char_field", delimiter=" ", distinct=False)
)
self.assertEqual(values["stringagg"].count("Foo"), 2)
self.assertEqual(values["stringagg"].count("Bar"), 1)
def test_string_agg_distinct_true(self):
values = AggregateTestModel.objects.aggregate(
stringagg=StringAgg("char_field", delimiter=" ", distinct=True)
)
self.assertEqual(values["stringagg"].count("Foo"), 1)
self.assertEqual(values["stringagg"].count("Bar"), 1)
def test_array_agg_distinct_false(self):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg("char_field", distinct=False)
)
self.assertEqual(sorted(values["arrayagg"]), ["Bar", "Foo", "Foo"])
def test_array_agg_distinct_true(self):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg("char_field", distinct=True)
)
self.assertEqual(sorted(values["arrayagg"]), ["Bar", "Foo"])
def test_jsonb_agg_distinct_false(self):
values = AggregateTestModel.objects.aggregate(
jsonbagg=JSONBAgg("char_field", distinct=False),
)
self.assertEqual(sorted(values["jsonbagg"]), ["Bar", "Foo", "Foo"])
def test_jsonb_agg_distinct_true(self):
values = AggregateTestModel.objects.aggregate(
jsonbagg=JSONBAgg("char_field", distinct=True),
)
self.assertEqual(sorted(values["jsonbagg"]), ["Bar", "Foo"])
class TestStatisticsAggregate(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
StatTestModel.objects.create(
int1=1,
int2=3,
related_field=AggregateTestModel.objects.create(integer_field=0),
)
StatTestModel.objects.create(
int1=2,
int2=2,
related_field=AggregateTestModel.objects.create(integer_field=1),
)
StatTestModel.objects.create(
int1=3,
int2=1,
related_field=AggregateTestModel.objects.create(integer_field=2),
)
# Tests for base class (StatAggregate)
def test_missing_arguments_raises_exception(self):
with self.assertRaisesMessage(ValueError, "Both y and x must be provided."):
StatAggregate(x=None, y=None)
def test_correct_source_expressions(self):
func = StatAggregate(x="test", y=13)
self.assertIsInstance(func.source_expressions[0], Value)
self.assertIsInstance(func.source_expressions[1], F)
def test_alias_is_required(self):
class SomeFunc(StatAggregate):
function = "TEST"
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
StatTestModel.objects.aggregate(SomeFunc(y="int2", x="int1"))
# Test aggregates
def test_empty_result_set(self):
StatTestModel.objects.all().delete()
tests = [
(Corr(y="int2", x="int1"), None),
(CovarPop(y="int2", x="int1"), None),
(CovarPop(y="int2", x="int1", sample=True), None),
(RegrAvgX(y="int2", x="int1"), None),
(RegrAvgY(y="int2", x="int1"), None),
(RegrCount(y="int2", x="int1"), 0),
(RegrIntercept(y="int2", x="int1"), None),
(RegrR2(y="int2", x="int1"), None),
(RegrSlope(y="int2", x="int1"), None),
(RegrSXX(y="int2", x="int1"), None),
(RegrSXY(y="int2", x="int1"), None),
(RegrSYY(y="int2", x="int1"), None),
]
for aggregation, expected_result in tests:
with self.subTest(aggregation=aggregation):
# Empty result with non-execution optimization.
with self.assertNumQueries(0):
values = StatTestModel.objects.none().aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {"aggregation": expected_result})
# Empty result when query must be executed.
with self.assertNumQueries(1):
values = StatTestModel.objects.aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {"aggregation": expected_result})
def test_default_argument(self):
StatTestModel.objects.all().delete()
tests = [
(Corr(y="int2", x="int1", default=0), 0),
(CovarPop(y="int2", x="int1", default=0), 0),
(CovarPop(y="int2", x="int1", sample=True, default=0), 0),
(RegrAvgX(y="int2", x="int1", default=0), 0),
(RegrAvgY(y="int2", x="int1", default=0), 0),
# RegrCount() doesn't support the default argument.
(RegrIntercept(y="int2", x="int1", default=0), 0),
(RegrR2(y="int2", x="int1", default=0), 0),
(RegrSlope(y="int2", x="int1", default=0), 0),
(RegrSXX(y="int2", x="int1", default=0), 0),
(RegrSXY(y="int2", x="int1", default=0), 0),
(RegrSYY(y="int2", x="int1", default=0), 0),
]
for aggregation, expected_result in tests:
with self.subTest(aggregation=aggregation):
# Empty result with non-execution optimization.
with self.assertNumQueries(0):
values = StatTestModel.objects.none().aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {"aggregation": expected_result})
# Empty result when query must be executed.
with self.assertNumQueries(1):
values = StatTestModel.objects.aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {"aggregation": expected_result})
def test_corr_general(self):
values = StatTestModel.objects.aggregate(corr=Corr(y="int2", x="int1"))
self.assertEqual(values, {"corr": -1.0})
def test_covar_pop_general(self):
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y="int2", x="int1"))
self.assertEqual(values, {"covarpop": Approximate(-0.66, places=1)})
def test_covar_pop_sample(self):
values = StatTestModel.objects.aggregate(
covarpop=CovarPop(y="int2", x="int1", sample=True)
)
self.assertEqual(values, {"covarpop": -1.0})
def test_regr_avgx_general(self):
values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y="int2", x="int1"))
self.assertEqual(values, {"regravgx": 2.0})
def test_regr_avgy_general(self):
values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y="int2", x="int1"))
self.assertEqual(values, {"regravgy": 2.0})
def test_regr_count_general(self):
values = StatTestModel.objects.aggregate(
regrcount=RegrCount(y="int2", x="int1")
)
self.assertEqual(values, {"regrcount": 3})
def test_regr_count_default(self):
msg = "RegrCount does not allow default."
with self.assertRaisesMessage(TypeError, msg):
RegrCount(y="int2", x="int1", default=0)
def test_regr_intercept_general(self):
values = StatTestModel.objects.aggregate(
regrintercept=RegrIntercept(y="int2", x="int1")
)
self.assertEqual(values, {"regrintercept": 4})
def test_regr_r2_general(self):
values = StatTestModel.objects.aggregate(regrr2=RegrR2(y="int2", x="int1"))
self.assertEqual(values, {"regrr2": 1})
def test_regr_slope_general(self):
values = StatTestModel.objects.aggregate(
regrslope=RegrSlope(y="int2", x="int1")
)
self.assertEqual(values, {"regrslope": -1})
def test_regr_sxx_general(self):
values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y="int2", x="int1"))
self.assertEqual(values, {"regrsxx": 2.0})
def test_regr_sxy_general(self):
values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y="int2", x="int1"))
self.assertEqual(values, {"regrsxy": -2.0})
def test_regr_syy_general(self):
values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y="int2", x="int1"))
self.assertEqual(values, {"regrsyy": 2.0})
def test_regr_avgx_with_related_obj_and_number_as_argument(self):
"""
This is more complex test to check if JOIN on field and
number as argument works as expected.
"""
values = StatTestModel.objects.aggregate(
complex_regravgx=RegrAvgX(y=5, x="related_field__integer_field")
)
self.assertEqual(values, {"complex_regravgx": 1.0})
|
8e026be2e64f86a305664edc33394d8ff4c97e5e726d07f1f7ca4ea607df4852 | import datetime
import pickle
import sys
import unittest
from operator import attrgetter
from threading import Lock
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DEFAULT_DB_ALIAS, connection
from django.db.models import Count, Exists, F, Max, OuterRef, Q
from django.db.models.expressions import RawSQL
from django.db.models.sql.constants import LOUTER
from django.db.models.sql.where import AND, OR, NothingNode, WhereNode
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext, ignore_warnings
from django.utils.deprecation import RemovedInDjango50Warning
from .models import (
FK1,
Annotation,
Article,
Author,
BaseA,
BaseUser,
Book,
CategoryItem,
CategoryRelationship,
Celebrity,
Channel,
Chapter,
Child,
ChildObjectA,
Classroom,
CommonMixedCaseForeignKeys,
Company,
Cover,
CustomPk,
CustomPkTag,
DateTimePK,
Detail,
DumbCategory,
Eaten,
Employment,
ExtraInfo,
Fan,
Food,
Identifier,
Individual,
Item,
Job,
JobResponsibilities,
Join,
LeafA,
LeafB,
LoopX,
LoopZ,
ManagedModel,
Member,
MixedCaseDbColumnCategoryItem,
MixedCaseFieldCategoryItem,
ModelA,
ModelB,
ModelC,
ModelD,
MyObject,
NamedCategory,
Node,
Note,
NullableName,
Number,
ObjectA,
ObjectB,
ObjectC,
OneToOneCategory,
Order,
OrderItem,
Page,
Paragraph,
Person,
Plaything,
PointerA,
Program,
ProxyCategory,
ProxyObjectA,
ProxyObjectB,
Ranking,
Related,
RelatedIndividual,
RelatedObject,
Report,
ReportComment,
ReservedName,
Responsibility,
School,
SharedConnection,
SimpleCategory,
SingleObject,
SpecialCategory,
Staff,
StaffUser,
Student,
Tag,
Task,
Teacher,
Ticket21203Child,
Ticket21203Parent,
Ticket23605A,
Ticket23605B,
Ticket23605C,
TvChef,
Valid,
X,
)
class Queries1Tests(TestCase):
@classmethod
def setUpTestData(cls):
cls.nc1 = generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name="t1", category=generic)
cls.t2 = Tag.objects.create(name="t2", parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name="t3", parent=cls.t1)
cls.t4 = Tag.objects.create(name="t4", parent=cls.t3)
cls.t5 = Tag.objects.create(name="t5", parent=cls.t3)
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
cls.n2 = Note.objects.create(note="n2", misc="bar", id=2)
cls.n3 = Note.objects.create(note="n3", misc="foo", id=3, negate=False)
cls.ann1 = Annotation.objects.create(name="a1", tag=cls.t1)
cls.ann1.notes.add(cls.n1)
ann2 = Annotation.objects.create(name="a2", tag=cls.t4)
ann2.notes.add(cls.n2, cls.n3)
# Create these out of order so that sorting by 'id' will be different to sorting
# by 'info'. Helps detect some problems later.
cls.e2 = ExtraInfo.objects.create(
info="e2", note=cls.n2, value=41, filterable=False
)
e1 = ExtraInfo.objects.create(info="e1", note=cls.n1, value=42)
cls.a1 = Author.objects.create(name="a1", num=1001, extra=e1)
cls.a2 = Author.objects.create(name="a2", num=2002, extra=e1)
cls.a3 = Author.objects.create(name="a3", num=3003, extra=cls.e2)
cls.a4 = Author.objects.create(name="a4", num=4004, extra=cls.e2)
cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
cls.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
cls.i1 = Item.objects.create(
name="one",
created=cls.time1,
modified=cls.time1,
creator=cls.a1,
note=cls.n3,
)
cls.i1.tags.set([cls.t1, cls.t2])
cls.i2 = Item.objects.create(
name="two", created=cls.time2, creator=cls.a2, note=cls.n2
)
cls.i2.tags.set([cls.t1, cls.t3])
cls.i3 = Item.objects.create(
name="three", created=time3, creator=cls.a2, note=cls.n3
)
cls.i4 = Item.objects.create(
name="four", created=time4, creator=cls.a4, note=cls.n3
)
cls.i4.tags.set([cls.t4])
cls.r1 = Report.objects.create(name="r1", creator=cls.a1)
cls.r2 = Report.objects.create(name="r2", creator=cls.a3)
cls.r3 = Report.objects.create(name="r3")
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
# will be rank3, rank2, rank1.
cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2)
cls.c1 = Cover.objects.create(title="first", item=cls.i4)
cls.c2 = Cover.objects.create(title="second", item=cls.i2)
def test_subquery_condition(self):
qs1 = Tag.objects.filter(pk__lte=0)
qs2 = Tag.objects.filter(parent__in=qs1)
qs3 = Tag.objects.filter(parent__in=qs2)
self.assertEqual(qs3.query.subq_aliases, {"T", "U", "V"})
self.assertIn("v0", str(qs3.query).lower())
qs4 = qs3.filter(parent__in=qs1)
self.assertEqual(qs4.query.subq_aliases, {"T", "U", "V"})
# It is possible to reuse U for the second subquery, no need to use W.
self.assertNotIn("w0", str(qs4.query).lower())
# So, 'U0."id"' is referenced in SELECT and WHERE twice.
self.assertEqual(str(qs4.query).lower().count("u0."), 4)
def test_ticket1050(self):
self.assertSequenceEqual(
Item.objects.filter(tags__isnull=True),
[self.i3],
)
self.assertSequenceEqual(
Item.objects.filter(tags__id__isnull=True),
[self.i3],
)
def test_ticket1801(self):
self.assertSequenceEqual(
Author.objects.filter(item=self.i2),
[self.a2],
)
self.assertSequenceEqual(
Author.objects.filter(item=self.i3),
[self.a2],
)
self.assertSequenceEqual(
Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3),
[self.a2],
)
def test_ticket2306(self):
# Checking that no join types are "left outer" joins.
query = Item.objects.filter(tags=self.t2).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1)).order_by("name"),
[self.i1, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)),
[self.i1],
)
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1)).filter(
Q(creator__name="fred") | Q(tags=self.t2)
),
[self.i1],
)
# Each filter call is processed "at once" against a single table, so this is
# different from the previous example as it tries to find tags that are two
# things at once (rather than two tags).
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)), []
)
self.assertSequenceEqual(
Item.objects.filter(
Q(tags=self.t1), Q(creator__name="fred") | Q(tags=self.t2)
),
[],
)
qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id)
self.assertSequenceEqual(list(qs), [self.a2])
self.assertEqual(2, qs.query.count_active_tables(), 2)
qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id)
self.assertEqual(qs.query.count_active_tables(), 3)
def test_ticket4464(self):
self.assertSequenceEqual(
Item.objects.filter(tags=self.t1).filter(tags=self.t2),
[self.i1],
)
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2])
.distinct()
.order_by("name"),
[self.i1, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3),
[self.i2],
)
# Make sure .distinct() works with slicing (this was broken in Oracle).
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).order_by("name")[:3],
[self.i1, self.i1, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2])
.distinct()
.order_by("name")[:3],
[self.i1, self.i2],
)
def test_tickets_2080_3592(self):
self.assertSequenceEqual(
Author.objects.filter(item__name="one") | Author.objects.filter(name="a3"),
[self.a1, self.a3],
)
self.assertSequenceEqual(
Author.objects.filter(Q(item__name="one") | Q(name="a3")),
[self.a1, self.a3],
)
self.assertSequenceEqual(
Author.objects.filter(Q(name="a3") | Q(item__name="one")),
[self.a1, self.a3],
)
self.assertSequenceEqual(
Author.objects.filter(Q(item__name="three") | Q(report__name="r3")),
[self.a2],
)
def test_ticket6074(self):
# Merging two empty result sets shouldn't leave a queryset with no constraints
# (which would match everything).
self.assertSequenceEqual(Author.objects.filter(Q(id__in=[])), [])
self.assertSequenceEqual(Author.objects.filter(Q(id__in=[]) | Q(id__in=[])), [])
def test_tickets_1878_2939(self):
self.assertEqual(Item.objects.values("creator").distinct().count(), 3)
# Create something with a duplicate 'name' so that we can test multi-column
# cases (which require some tricky SQL transformations under the covers).
xx = Item(name="four", created=self.time1, creator=self.a2, note=self.n1)
xx.save()
self.assertEqual(
Item.objects.exclude(name="two")
.values("creator", "name")
.distinct()
.count(),
4,
)
self.assertEqual(
(
Item.objects.exclude(name="two")
.extra(select={"foo": "%s"}, select_params=(1,))
.values("creator", "name", "foo")
.distinct()
.count()
),
4,
)
self.assertEqual(
(
Item.objects.exclude(name="two")
.extra(select={"foo": "%s"}, select_params=(1,))
.values("creator", "name")
.distinct()
.count()
),
4,
)
xx.delete()
def test_ticket7323(self):
self.assertEqual(Item.objects.values("creator", "name").count(), 4)
def test_ticket2253(self):
q1 = Item.objects.order_by("name")
q2 = Item.objects.filter(id=self.i1.id)
self.assertSequenceEqual(q1, [self.i4, self.i1, self.i3, self.i2])
self.assertSequenceEqual(q2, [self.i1])
self.assertSequenceEqual(
(q1 | q2).order_by("name"),
[self.i4, self.i1, self.i3, self.i2],
)
self.assertSequenceEqual((q1 & q2).order_by("name"), [self.i1])
q1 = Item.objects.filter(tags=self.t1)
q2 = Item.objects.filter(note=self.n3, tags=self.t2)
q3 = Item.objects.filter(creator=self.a4)
self.assertSequenceEqual(
((q1 & q2) | q3).order_by("name"),
[self.i4, self.i1],
)
def test_order_by_tables(self):
q1 = Item.objects.order_by("name")
q2 = Item.objects.filter(id=self.i1.id)
list(q2)
combined_query = (q1 & q2).order_by("name").query
self.assertEqual(
len(
[
t
for t in combined_query.alias_map
if combined_query.alias_refcount[t]
]
),
1,
)
def test_order_by_join_unref(self):
"""
This test is related to the above one, testing that there aren't
old JOINs in the query.
"""
qs = Celebrity.objects.order_by("greatest_fan__fan_of")
self.assertIn("OUTER JOIN", str(qs.query))
qs = qs.order_by("id")
self.assertNotIn("OUTER JOIN", str(qs.query))
def test_get_clears_ordering(self):
"""
get() should clear ordering for optimization purposes.
"""
with CaptureQueriesContext(connection) as captured_queries:
Author.objects.order_by("name").get(pk=self.a1.pk)
self.assertNotIn("order by", captured_queries[0]["sql"].lower())
def test_tickets_4088_4306(self):
self.assertSequenceEqual(Report.objects.filter(creator=1001), [self.r1])
self.assertSequenceEqual(Report.objects.filter(creator__num=1001), [self.r1])
self.assertSequenceEqual(Report.objects.filter(creator__id=1001), [])
self.assertSequenceEqual(
Report.objects.filter(creator__id=self.a1.id), [self.r1]
)
self.assertSequenceEqual(Report.objects.filter(creator__name="a1"), [self.r1])
def test_ticket4510(self):
self.assertSequenceEqual(
Author.objects.filter(report__name="r1"),
[self.a1],
)
def test_ticket7378(self):
self.assertSequenceEqual(self.a1.report_set.all(), [self.r1])
def test_tickets_5324_6704(self):
self.assertSequenceEqual(
Item.objects.filter(tags__name="t4"),
[self.i4],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t4").order_by("name").distinct(),
[self.i1, self.i3, self.i2],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t4").order_by("name").distinct().reverse(),
[self.i2, self.i3, self.i1],
)
self.assertSequenceEqual(
Author.objects.exclude(item__name="one").distinct().order_by("name"),
[self.a2, self.a3, self.a4],
)
# Excluding across a m2m relation when there is more than one related
# object associated was problematic.
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t1").order_by("name"),
[self.i4, self.i3],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t1").exclude(tags__name="t4"),
[self.i3],
)
# Excluding from a relation that cannot be NULL should not use outer joins.
query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
# Similarly, when one of the joins cannot possibly, ever, involve NULL
# values (Author -> ExtraInfo, in the following), it should never be
# promoted to a left outer join. So the following query should only
# involve one "left outer" join (Author -> Item is 0-to-many).
qs = Author.objects.filter(id=self.a1.id).filter(
Q(extra__note=self.n1) | Q(item__note=self.n3)
)
self.assertEqual(
len(
[
x
for x in qs.query.alias_map.values()
if x.join_type == LOUTER and qs.query.alias_refcount[x.table_alias]
]
),
1,
)
# The previous changes shouldn't affect nullable foreign key joins.
self.assertSequenceEqual(
Tag.objects.filter(parent__isnull=True).order_by("name"), [self.t1]
)
self.assertSequenceEqual(
Tag.objects.exclude(parent__isnull=True).order_by("name"),
[self.t2, self.t3, self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.exclude(Q(parent__name="t1") | Q(parent__isnull=True)).order_by(
"name"
),
[self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name="t1")).order_by(
"name"
),
[self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by("name"),
[self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by("name"),
[self.t4, self.t5],
)
def test_ticket2091(self):
t = Tag.objects.get(name="t4")
self.assertSequenceEqual(Item.objects.filter(tags__in=[t]), [self.i4])
def test_avoid_infinite_loop_on_too_many_subqueries(self):
x = Tag.objects.filter(pk=1)
local_recursion_limit = sys.getrecursionlimit() // 16
msg = "Maximum recursion depth exceeded: too many subqueries."
with self.assertRaisesMessage(RecursionError, msg):
for i in range(local_recursion_limit + 2):
x = Tag.objects.filter(pk__in=x)
def test_reasonable_number_of_subq_aliases(self):
x = Tag.objects.filter(pk=1)
for _ in range(20):
x = Tag.objects.filter(pk__in=x)
self.assertEqual(
x.query.subq_aliases,
{
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"AA",
"AB",
"AC",
"AD",
"AE",
"AF",
"AG",
"AH",
"AI",
"AJ",
"AK",
"AL",
"AM",
"AN",
},
)
def test_heterogeneous_qs_combination(self):
# Combining querysets built on different models should behave in a well-defined
# fashion. We raise an error.
msg = "Cannot combine queries on two different base models."
with self.assertRaisesMessage(TypeError, msg):
Author.objects.all() & Tag.objects.all()
with self.assertRaisesMessage(TypeError, msg):
Author.objects.all() | Tag.objects.all()
def test_ticket3141(self):
self.assertEqual(Author.objects.extra(select={"foo": "1"}).count(), 4)
self.assertEqual(
Author.objects.extra(select={"foo": "%s"}, select_params=(1,)).count(), 4
)
def test_ticket2400(self):
self.assertSequenceEqual(
Author.objects.filter(item__isnull=True),
[self.a3],
)
self.assertSequenceEqual(
Tag.objects.filter(item__isnull=True),
[self.t5],
)
def test_ticket2496(self):
self.assertSequenceEqual(
Item.objects.extra(tables=["queries_author"])
.select_related()
.order_by("name")[:1],
[self.i4],
)
def test_error_raised_on_filter_with_dictionary(self):
with self.assertRaisesMessage(FieldError, "Cannot parse keyword query as dict"):
Note.objects.filter({"note": "n1", "misc": "foo"})
def test_tickets_2076_7256(self):
# Ordering on related tables should be possible, even if the table is
# not otherwise involved.
self.assertSequenceEqual(
Item.objects.order_by("note__note", "name"),
[self.i2, self.i4, self.i1, self.i3],
)
# Ordering on a related field should use the remote model's default
# ordering as a final step.
self.assertSequenceEqual(
Author.objects.order_by("extra", "-name"),
[self.a2, self.a1, self.a4, self.a3],
)
# Using remote model default ordering can span multiple models (in this
# case, Cover is ordered by Item's default, which uses Note's default).
self.assertSequenceEqual(Cover.objects.all(), [self.c1, self.c2])
# If the remote model does not have a default ordering, we order by its 'id'
# field.
self.assertSequenceEqual(
Item.objects.order_by("creator", "name"),
[self.i1, self.i3, self.i2, self.i4],
)
# Ordering by a many-valued attribute (e.g. a many-to-many or reverse
# ForeignKey) is legal, but the results might not make sense. That
# isn't Django's problem. Garbage in, garbage out.
self.assertSequenceEqual(
Item.objects.filter(tags__isnull=False).order_by("tags", "id"),
[self.i1, self.i2, self.i1, self.i2, self.i4],
)
# If we replace the default ordering, Django adjusts the required
# tables automatically. Item normally requires a join with Note to do
# the default ordering, but that isn't needed here.
qs = Item.objects.order_by("name")
self.assertSequenceEqual(qs, [self.i4, self.i1, self.i3, self.i2])
self.assertEqual(len(qs.query.alias_map), 1)
def test_tickets_2874_3002(self):
qs = Item.objects.select_related().order_by("note__note", "name")
self.assertQuerysetEqual(qs, [self.i2, self.i4, self.i1, self.i3])
# This is also a good select_related() test because there are multiple
# Note entries in the SQL. The two Note items should be different.
self.assertEqual(repr(qs[0].note), "<Note: n2>")
self.assertEqual(repr(qs[0].creator.extra.note), "<Note: n1>")
def test_ticket3037(self):
self.assertSequenceEqual(
Item.objects.filter(
Q(creator__name="a3", name="two") | Q(creator__name="a4", name="four")
),
[self.i4],
)
def test_tickets_5321_7070(self):
# Ordering columns must be included in the output columns. Note that
# this means results that might otherwise be distinct are not (if there
# are multiple values in the ordering cols), as in this example. This
# isn't a bug; it's a warning to be careful with the selection of
# ordering columns.
self.assertSequenceEqual(
Note.objects.values("misc").distinct().order_by("note", "-misc"),
[{"misc": "foo"}, {"misc": "bar"}, {"misc": "foo"}],
)
def test_ticket4358(self):
# If you don't pass any fields to values(), relation fields are
# returned as "foo_id" keys, not "foo". For consistency, you should be
# able to pass "foo_id" in the fields list and have it work, too. We
# actually allow both "foo" and "foo_id".
# The *_id version is returned by default.
self.assertIn("note_id", ExtraInfo.objects.values()[0])
# You can also pass it in explicitly.
self.assertSequenceEqual(
ExtraInfo.objects.values("note_id"), [{"note_id": 1}, {"note_id": 2}]
)
# ...or use the field name.
self.assertSequenceEqual(
ExtraInfo.objects.values("note"), [{"note": 1}, {"note": 2}]
)
def test_ticket6154(self):
# Multiple filter statements are joined using "AND" all the time.
self.assertSequenceEqual(
Author.objects.filter(id=self.a1.id).filter(
Q(extra__note=self.n1) | Q(item__note=self.n3)
),
[self.a1],
)
self.assertSequenceEqual(
Author.objects.filter(
Q(extra__note=self.n1) | Q(item__note=self.n3)
).filter(id=self.a1.id),
[self.a1],
)
def test_ticket6981(self):
self.assertSequenceEqual(
Tag.objects.select_related("parent").order_by("name"),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
def test_ticket9926(self):
self.assertSequenceEqual(
Tag.objects.select_related("parent", "category").order_by("name"),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.select_related("parent", "parent__category").order_by("name"),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
def test_tickets_6180_6203(self):
# Dates with limits and/or counts
self.assertEqual(Item.objects.count(), 4)
self.assertEqual(Item.objects.datetimes("created", "month").count(), 1)
self.assertEqual(Item.objects.datetimes("created", "day").count(), 2)
self.assertEqual(len(Item.objects.datetimes("created", "day")), 2)
self.assertEqual(
Item.objects.datetimes("created", "day")[0],
datetime.datetime(2007, 12, 19, 0, 0),
)
def test_tickets_7087_12242(self):
# Dates with extra select columns
self.assertSequenceEqual(
Item.objects.datetimes("created", "day").extra(select={"a": 1}),
[
datetime.datetime(2007, 12, 19, 0, 0),
datetime.datetime(2007, 12, 20, 0, 0),
],
)
self.assertSequenceEqual(
Item.objects.extra(select={"a": 1}).datetimes("created", "day"),
[
datetime.datetime(2007, 12, 19, 0, 0),
datetime.datetime(2007, 12, 20, 0, 0),
],
)
name = "one"
self.assertSequenceEqual(
Item.objects.datetimes("created", "day").extra(
where=["name=%s"], params=[name]
),
[datetime.datetime(2007, 12, 19, 0, 0)],
)
self.assertSequenceEqual(
Item.objects.extra(where=["name=%s"], params=[name]).datetimes(
"created", "day"
),
[datetime.datetime(2007, 12, 19, 0, 0)],
)
def test_ticket7155(self):
# Nullable dates
self.assertSequenceEqual(
Item.objects.datetimes("modified", "day"),
[datetime.datetime(2007, 12, 19, 0, 0)],
)
def test_order_by_rawsql(self):
self.assertSequenceEqual(
Item.objects.values("note__note").order_by(
RawSQL("queries_note.note", ()),
"id",
),
[
{"note__note": "n2"},
{"note__note": "n3"},
{"note__note": "n3"},
{"note__note": "n3"},
],
)
def test_ticket7096(self):
# Make sure exclude() with multiple conditions continues to work.
self.assertSequenceEqual(
Tag.objects.filter(parent=self.t1, name="t3").order_by("name"),
[self.t3],
)
self.assertSequenceEqual(
Tag.objects.exclude(parent=self.t1, name="t3").order_by("name"),
[self.t1, self.t2, self.t4, self.t5],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t1", name="one")
.order_by("name")
.distinct(),
[self.i4, self.i3, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(name__in=["three", "four"])
.exclude(tags__name="t1")
.order_by("name"),
[self.i4, self.i3],
)
# More twisted cases, involving nested negations.
self.assertSequenceEqual(
Item.objects.exclude(~Q(tags__name="t1", name="one")),
[self.i1],
)
self.assertSequenceEqual(
Item.objects.filter(~Q(tags__name="t1", name="one"), name="two"),
[self.i2],
)
self.assertSequenceEqual(
Item.objects.exclude(~Q(tags__name="t1", name="one"), name="two"),
[self.i4, self.i1, self.i3],
)
def test_tickets_7204_7506(self):
# Make sure querysets with related fields can be pickled. If this
# doesn't crash, it's a Good Thing.
pickle.dumps(Item.objects.all())
def test_ticket7813(self):
# We should also be able to pickle things that use select_related().
# The only tricky thing here is to ensure that we do the related
# selections properly after unpickling.
qs = Item.objects.select_related()
query = qs.query.get_compiler(qs.db).as_sql()[0]
query2 = pickle.loads(pickle.dumps(qs.query))
self.assertEqual(query2.get_compiler(qs.db).as_sql()[0], query)
def test_deferred_load_qs_pickling(self):
# Check pickling of deferred-loading querysets
qs = Item.objects.defer("name", "creator")
q2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(list(qs), list(q2))
q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL))
self.assertEqual(list(qs), list(q3))
def test_ticket7277(self):
self.assertSequenceEqual(
self.n1.annotation_set.filter(
Q(tag=self.t5)
| Q(tag__children=self.t5)
| Q(tag__children__children=self.t5)
),
[self.ann1],
)
def test_tickets_7448_7707(self):
# Complex objects should be converted to strings before being used in
# lookups.
self.assertSequenceEqual(
Item.objects.filter(created__in=[self.time1, self.time2]),
[self.i1, self.i2],
)
def test_ticket7235(self):
# An EmptyQuerySet should not raise exceptions if it is filtered.
Eaten.objects.create(meal="m")
q = Eaten.objects.none()
with self.assertNumQueries(0):
self.assertQuerysetEqual(q.all(), [])
self.assertQuerysetEqual(q.filter(meal="m"), [])
self.assertQuerysetEqual(q.exclude(meal="m"), [])
self.assertQuerysetEqual(q.complex_filter({"pk": 1}), [])
self.assertQuerysetEqual(q.select_related("food"), [])
self.assertQuerysetEqual(q.annotate(Count("food")), [])
self.assertQuerysetEqual(q.order_by("meal", "food"), [])
self.assertQuerysetEqual(q.distinct(), [])
self.assertQuerysetEqual(q.extra(select={"foo": "1"}), [])
self.assertQuerysetEqual(q.reverse(), [])
q.query.low_mark = 1
msg = "Cannot change a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
q.extra(select={"foo": "1"})
self.assertQuerysetEqual(q.defer("meal"), [])
self.assertQuerysetEqual(q.only("meal"), [])
def test_ticket7791(self):
# There were "issues" when ordering and distinct-ing on fields related
# via ForeignKeys.
self.assertEqual(len(Note.objects.order_by("extrainfo__info").distinct()), 3)
# Pickling of QuerySets using datetimes() should work.
qs = Item.objects.datetimes("created", "month")
pickle.loads(pickle.dumps(qs))
def test_ticket9997(self):
# If a ValuesList or Values queryset is passed as an inner query, we
# make sure it's only requesting a single value and use that as the
# thing to select.
self.assertSequenceEqual(
Tag.objects.filter(
name__in=Tag.objects.filter(parent=self.t1).values("name")
),
[self.t2, self.t3],
)
# Multi-valued values() and values_list() querysets should raise errors.
with self.assertRaisesMessage(
TypeError, "Cannot use multi-field values as a filter value."
):
Tag.objects.filter(
name__in=Tag.objects.filter(parent=self.t1).values("name", "id")
)
with self.assertRaisesMessage(
TypeError, "Cannot use multi-field values as a filter value."
):
Tag.objects.filter(
name__in=Tag.objects.filter(parent=self.t1).values_list("name", "id")
)
def test_ticket9985(self):
# qs.values_list(...).values(...) combinations should work.
self.assertSequenceEqual(
Note.objects.values_list("note", flat=True).values("id").order_by("id"),
[{"id": 1}, {"id": 2}, {"id": 3}],
)
self.assertSequenceEqual(
Annotation.objects.filter(
notes__in=Note.objects.filter(note="n1")
.values_list("note")
.values("id")
),
[self.ann1],
)
def test_ticket10205(self):
# When bailing out early because of an empty "__in" filter, we need
# to set things up correctly internally so that subqueries can continue
# properly.
self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0)
def test_ticket10432(self):
# Testing an empty "__in" filter with a generator as the value.
def f():
return iter([])
n_obj = Note.objects.all()[0]
def g():
yield n_obj.pk
self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), [])
self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj])
def test_ticket10742(self):
# Queries used in an __in clause don't execute subqueries
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.filter(pk__in=subq)
self.assertSequenceEqual(qs, [self.a1, self.a2])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.exclude(pk__in=subq)
self.assertSequenceEqual(qs, [self.a3, self.a4])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
self.assertSequenceEqual(
Author.objects.filter(Q(pk__in=subq) & Q(name="a1")),
[self.a1],
)
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
def test_ticket7076(self):
# Excluding shouldn't eliminate NULL entries.
self.assertSequenceEqual(
Item.objects.exclude(modified=self.time1).order_by("name"),
[self.i4, self.i3, self.i2],
)
self.assertSequenceEqual(
Tag.objects.exclude(parent__name=self.t1.name),
[self.t1, self.t4, self.t5],
)
def test_ticket7181(self):
# Ordering by related tables should accommodate nullable fields (this
# test is a little tricky, since NULL ordering is database dependent.
# Instead, we just count the number of results).
self.assertEqual(len(Tag.objects.order_by("parent__name")), 5)
# Empty querysets can be merged with others.
self.assertSequenceEqual(
Note.objects.none() | Note.objects.all(),
[self.n1, self.n2, self.n3],
)
self.assertSequenceEqual(
Note.objects.all() | Note.objects.none(),
[self.n1, self.n2, self.n3],
)
self.assertSequenceEqual(Note.objects.none() & Note.objects.all(), [])
self.assertSequenceEqual(Note.objects.all() & Note.objects.none(), [])
def test_ticket8439(self):
# Complex combinations of conjunctions, disjunctions and nullable
# relations.
self.assertSequenceEqual(
Author.objects.filter(
Q(item__note__extrainfo=self.e2) | Q(report=self.r1, name="xyz")
),
[self.a2],
)
self.assertSequenceEqual(
Author.objects.filter(
Q(report=self.r1, name="xyz") | Q(item__note__extrainfo=self.e2)
),
[self.a2],
)
self.assertSequenceEqual(
Annotation.objects.filter(
Q(tag__parent=self.t1) | Q(notes__note="n1", name="a1")
),
[self.ann1],
)
xx = ExtraInfo.objects.create(info="xx", note=self.n3)
self.assertSequenceEqual(
Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)),
[self.n1, self.n3],
)
q = Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)).query
self.assertEqual(
len(
[
x
for x in q.alias_map.values()
if x.join_type == LOUTER and q.alias_refcount[x.table_alias]
]
),
1,
)
def test_ticket17429(self):
"""
Meta.ordering=None works the same as Meta.ordering=[]
"""
original_ordering = Tag._meta.ordering
Tag._meta.ordering = None
try:
self.assertCountEqual(
Tag.objects.all(),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
finally:
Tag._meta.ordering = original_ordering
def test_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(tags__name="t4"),
Item.objects.filter(~Q(tags__name="t4")),
)
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name="t4") | Q(tags__name="t3")),
Item.objects.filter(~(Q(tags__name="t4") | Q(tags__name="t3"))),
)
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name="t4") | ~Q(tags__name="t3")),
Item.objects.filter(~(Q(tags__name="t4") | ~Q(tags__name="t3"))),
)
def test_nested_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name="t4")),
Item.objects.filter(~~Q(tags__name="t4")),
)
def test_double_exclude(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name="t4")),
Item.objects.filter(~~Q(tags__name="t4")),
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name="t4")),
Item.objects.filter(~Q(~Q(tags__name="t4"))),
)
def test_exclude_in(self):
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name__in=["t4", "t3"])),
Item.objects.filter(~Q(tags__name__in=["t4", "t3"])),
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name__in=["t4", "t3"])),
Item.objects.filter(~~Q(tags__name__in=["t4", "t3"])),
)
def test_ticket_10790_1(self):
# Querying direct fields with isnull should trim the left outer join.
# It also should not create INNER JOIN.
q = Tag.objects.filter(parent__isnull=True)
self.assertSequenceEqual(q, [self.t1])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.filter(parent__isnull=False)
self.assertSequenceEqual(q, [self.t2, self.t3, self.t4, self.t5])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.exclude(parent__isnull=True)
self.assertSequenceEqual(q, [self.t2, self.t3, self.t4, self.t5])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.exclude(parent__isnull=False)
self.assertSequenceEqual(q, [self.t1])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.exclude(parent__parent__isnull=False)
self.assertSequenceEqual(q, [self.t1, self.t2, self.t3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
self.assertNotIn("INNER JOIN", str(q.query))
def test_ticket_10790_2(self):
# Querying across several tables should strip only the last outer join,
# while preserving the preceding inner joins.
q = Tag.objects.filter(parent__parent__isnull=False)
self.assertSequenceEqual(q, [self.t4, self.t5])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
# Querying without isnull should not convert anything to left outer join.
q = Tag.objects.filter(parent__parent=self.t1)
self.assertSequenceEqual(q, [self.t4, self.t5])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
def test_ticket_10790_3(self):
# Querying via indirect fields should populate the left outer join
q = NamedCategory.objects.filter(tag__isnull=True)
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
# join to dumbcategory ptr_id
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
self.assertSequenceEqual(q, [])
# Querying across several tables should strip only the last join, while
# preserving the preceding left outer joins.
q = NamedCategory.objects.filter(tag__parent__isnull=True)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
self.assertSequenceEqual(q, [self.nc1])
def test_ticket_10790_4(self):
# Querying across m2m field should not strip the m2m table from join.
q = Author.objects.filter(item__tags__isnull=True)
self.assertSequenceEqual(q, [self.a2, self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 2)
self.assertNotIn("INNER JOIN", str(q.query))
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 3)
self.assertNotIn("INNER JOIN", str(q.query))
def test_ticket_10790_5(self):
# Querying with isnull=False across m2m field should not create outer joins
q = Author.objects.filter(item__tags__isnull=False)
self.assertSequenceEqual(q, [self.a1, self.a1, self.a2, self.a2, self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 2)
q = Author.objects.filter(item__tags__parent__isnull=False)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 3)
q = Author.objects.filter(item__tags__parent__parent__isnull=False)
self.assertSequenceEqual(q, [self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 4)
def test_ticket_10790_6(self):
# Querying with isnull=True across m2m field should not create inner joins
# and strip last outer join
q = Author.objects.filter(item__tags__parent__parent__isnull=True)
self.assertSequenceEqual(
q,
[self.a1, self.a1, self.a2, self.a2, self.a2, self.a3],
)
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 4)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 3)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
def test_ticket_10790_7(self):
# Reverse querying with isnull should not strip the join
q = Author.objects.filter(item__isnull=True)
self.assertSequenceEqual(q, [self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
q = Author.objects.filter(item__isnull=False)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
def test_ticket_10790_8(self):
# Querying with combined q-objects should also strip the left outer join
q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1))
self.assertSequenceEqual(q, [self.t1, self.t2, self.t3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
def test_ticket_10790_combine(self):
# Combining queries should not re-populate the left outer join
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__isnull=False)
q3 = q1 | q2
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3, self.t4, self.t5])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q3 = q1 & q2
self.assertSequenceEqual(q3, [])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q2 = Tag.objects.filter(parent=self.t1)
q3 = q1 | q2
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q3 = q2 | q1
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__parent__isnull=True)
q3 = q1 | q2
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q3 = q2 | q1
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
def test_ticket19672(self):
self.assertSequenceEqual(
Report.objects.filter(
Q(creator__isnull=False) & ~Q(creator__extra__value=41)
),
[self.r1],
)
def test_ticket_20250(self):
# A negated Q along with an annotated queryset failed in Django 1.4
qs = Author.objects.annotate(Count("item"))
qs = qs.filter(~Q(extra__value=0)).order_by("name")
self.assertIn("SELECT", str(qs.query))
self.assertSequenceEqual(qs, [self.a1, self.a2, self.a3, self.a4])
def test_lookup_constraint_fielderror(self):
msg = (
"Cannot resolve keyword 'unknown_field' into field. Choices are: "
"annotation, category, category_id, children, id, item, "
"managedmodel, name, note, parent, parent_id"
)
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.filter(unknown_field__name="generic")
def test_common_mixed_case_foreign_keys(self):
"""
Valid query should be generated when fields fetched from joined tables
include FKs whose names only differ by case.
"""
c1 = SimpleCategory.objects.create(name="c1")
c2 = SimpleCategory.objects.create(name="c2")
c3 = SimpleCategory.objects.create(name="c3")
category = CategoryItem.objects.create(category=c1)
mixed_case_field_category = MixedCaseFieldCategoryItem.objects.create(
CaTeGoRy=c2
)
mixed_case_db_column_category = MixedCaseDbColumnCategoryItem.objects.create(
category=c3
)
CommonMixedCaseForeignKeys.objects.create(
category=category,
mixed_case_field_category=mixed_case_field_category,
mixed_case_db_column_category=mixed_case_db_column_category,
)
qs = CommonMixedCaseForeignKeys.objects.values(
"category",
"mixed_case_field_category",
"mixed_case_db_column_category",
"category__category",
"mixed_case_field_category__CaTeGoRy",
"mixed_case_db_column_category__category",
)
self.assertTrue(qs.first())
def test_excluded_intermediary_m2m_table_joined(self):
self.assertSequenceEqual(
Note.objects.filter(~Q(tag__annotation__name=F("note"))),
[self.n1, self.n2, self.n3],
)
self.assertSequenceEqual(
Note.objects.filter(tag__annotation__name="a1").filter(
~Q(tag__annotation__name=F("note"))
),
[],
)
def test_field_with_filterable(self):
self.assertSequenceEqual(
Author.objects.filter(extra=self.e2),
[self.a3, self.a4],
)
def test_negate_field(self):
self.assertSequenceEqual(
Note.objects.filter(negate=True),
[self.n1, self.n2],
)
self.assertSequenceEqual(Note.objects.exclude(negate=True), [self.n3])
class Queries2Tests(TestCase):
@classmethod
def setUpTestData(cls):
cls.num4 = Number.objects.create(num=4)
cls.num8 = Number.objects.create(num=8)
cls.num12 = Number.objects.create(num=12)
def test_ticket4289(self):
# A slight variation on the restricting the filtering choices by the
# lookup constraints.
self.assertSequenceEqual(Number.objects.filter(num__lt=4), [])
self.assertSequenceEqual(Number.objects.filter(num__gt=8, num__lt=12), [])
self.assertSequenceEqual(
Number.objects.filter(num__gt=8, num__lt=13),
[self.num12],
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)), []
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)), []
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)), []
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)),
[self.num8],
)
def test_ticket12239(self):
# Custom lookups are registered to round float values correctly on gte
# and lt IntegerField queries.
self.assertSequenceEqual(
Number.objects.filter(num__gt=11.9),
[self.num12],
)
self.assertSequenceEqual(Number.objects.filter(num__gt=12), [])
self.assertSequenceEqual(Number.objects.filter(num__gt=12.0), [])
self.assertSequenceEqual(Number.objects.filter(num__gt=12.1), [])
self.assertCountEqual(
Number.objects.filter(num__lt=12),
[self.num4, self.num8],
)
self.assertCountEqual(
Number.objects.filter(num__lt=12.0),
[self.num4, self.num8],
)
self.assertCountEqual(
Number.objects.filter(num__lt=12.1),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__gte=11.9),
[self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__gte=12),
[self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__gte=12.0),
[self.num12],
)
self.assertSequenceEqual(Number.objects.filter(num__gte=12.1), [])
self.assertSequenceEqual(Number.objects.filter(num__gte=12.9), [])
self.assertCountEqual(
Number.objects.filter(num__lte=11.9),
[self.num4, self.num8],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12.0),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12.1),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12.9),
[self.num4, self.num8, self.num12],
)
def test_ticket7759(self):
# Count should work with a partially read result set.
count = Number.objects.count()
qs = Number.objects.all()
def run():
for obj in qs:
return qs.count() == count
self.assertTrue(run())
class Queries3Tests(TestCase):
def test_ticket7107(self):
# This shouldn't create an infinite loop.
self.assertQuerysetEqual(Valid.objects.all(), [])
def test_datetimes_invalid_field(self):
# An error should be raised when QuerySet.datetimes() is passed the
# wrong type of field.
msg = "'name' isn't a DateField, TimeField, or DateTimeField."
with self.assertRaisesMessage(TypeError, msg):
Item.objects.datetimes("name", "month")
def test_ticket22023(self):
with self.assertRaisesMessage(
TypeError, "Cannot call only() after .values() or .values_list()"
):
Valid.objects.values().only()
with self.assertRaisesMessage(
TypeError, "Cannot call defer() after .values() or .values_list()"
):
Valid.objects.values().defer()
class Queries4Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name="t1", category=generic)
n1 = Note.objects.create(note="n1", misc="foo")
n2 = Note.objects.create(note="n2", misc="bar")
e1 = ExtraInfo.objects.create(info="e1", note=n1)
e2 = ExtraInfo.objects.create(info="e2", note=n2)
cls.a1 = Author.objects.create(name="a1", num=1001, extra=e1)
cls.a3 = Author.objects.create(name="a3", num=3003, extra=e2)
cls.r1 = Report.objects.create(name="r1", creator=cls.a1)
cls.r2 = Report.objects.create(name="r2", creator=cls.a3)
cls.r3 = Report.objects.create(name="r3")
cls.i1 = Item.objects.create(
name="i1", created=datetime.datetime.now(), note=n1, creator=cls.a1
)
cls.i2 = Item.objects.create(
name="i2", created=datetime.datetime.now(), note=n1, creator=cls.a3
)
def test_ticket24525(self):
tag = Tag.objects.create()
anth100 = tag.note_set.create(note="ANTH", misc="100")
math101 = tag.note_set.create(note="MATH", misc="101")
s1 = tag.annotation_set.create(name="1")
s2 = tag.annotation_set.create(name="2")
s1.notes.set([math101, anth100])
s2.notes.set([math101])
result = math101.annotation_set.all() & tag.annotation_set.exclude(
notes__in=[anth100]
)
self.assertEqual(list(result), [s2])
def test_ticket11811(self):
unsaved_category = NamedCategory(name="Other")
msg = (
"Unsaved model instance <NamedCategory: Other> cannot be used in an ORM "
"query."
)
with self.assertRaisesMessage(ValueError, msg):
Tag.objects.filter(pk=self.t1.pk).update(category=unsaved_category)
def test_ticket14876(self):
# Note: when combining the query we need to have information available
# about the join type of the trimmed "creator__isnull" join. If we
# don't have that information, then the join is created as INNER JOIN
# and results will be incorrect.
q1 = Report.objects.filter(
Q(creator__isnull=True) | Q(creator__extra__info="e1")
)
q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter(
Q(creator__extra__info="e1")
)
self.assertCountEqual(q1, [self.r1, self.r3])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Report.objects.filter(
Q(creator__extra__info="e1") | Q(creator__isnull=True)
)
q2 = Report.objects.filter(
Q(creator__extra__info="e1")
) | Report.objects.filter(Q(creator__isnull=True))
self.assertCountEqual(q1, [self.r1, self.r3])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(
Q(creator=self.a1) | Q(creator__report__name="r1")
).order_by()
q2 = (
Item.objects.filter(Q(creator=self.a1)).order_by()
| Item.objects.filter(Q(creator__report__name="r1")).order_by()
)
self.assertCountEqual(q1, [self.i1])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(
Q(creator__report__name="e1") | Q(creator=self.a1)
).order_by()
q2 = (
Item.objects.filter(Q(creator__report__name="e1")).order_by()
| Item.objects.filter(Q(creator=self.a1)).order_by()
)
self.assertCountEqual(q1, [self.i1])
self.assertEqual(str(q1.query), str(q2.query))
def test_combine_join_reuse(self):
# Joins having identical connections are correctly recreated in the
# rhs query, in case the query is ORed together (#18748).
Report.objects.create(name="r4", creator=self.a1)
q1 = Author.objects.filter(report__name="r5")
q2 = Author.objects.filter(report__name="r4").filter(report__name="r1")
combined = q1 | q2
self.assertEqual(str(combined.query).count("JOIN"), 2)
self.assertEqual(len(combined), 1)
self.assertEqual(combined[0].name, "a1")
def test_combine_or_filter_reuse(self):
combined = Author.objects.filter(name="a1") | Author.objects.filter(name="a3")
self.assertEqual(combined.get(name="a1"), self.a1)
def test_join_reuse_order(self):
# Join aliases are reused in order. This shouldn't raise AssertionError
# because change_map contains a circular reference (#26522).
s1 = School.objects.create()
s2 = School.objects.create()
s3 = School.objects.create()
t1 = Teacher.objects.create()
otherteachers = Teacher.objects.exclude(pk=t1.pk).exclude(friends=t1)
qs1 = otherteachers.filter(schools=s1).filter(schools=s2)
qs2 = otherteachers.filter(schools=s1).filter(schools=s3)
self.assertQuerysetEqual(qs1 | qs2, [])
def test_ticket7095(self):
# Updates that are filtered on the model being updated are somewhat
# tricky in MySQL.
ManagedModel.objects.create(data="mm1", tag=self.t1, public=True)
self.assertEqual(ManagedModel.objects.update(data="mm"), 1)
# A values() or values_list() query across joined models must use outer
# joins appropriately.
# Note: In Oracle, we expect a null CharField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_charfield_repr = ""
else:
expected_null_charfield_repr = None
self.assertSequenceEqual(
Report.objects.values_list("creator__extra__info", flat=True).order_by(
"name"
),
["e1", "e2", expected_null_charfield_repr],
)
# Similarly for select_related(), joins beyond an initial nullable join
# must use outer joins so that all results are included.
self.assertSequenceEqual(
Report.objects.select_related("creator", "creator__extra").order_by("name"),
[self.r1, self.r2, self.r3],
)
# When there are multiple paths to a table from another table, we have
# to be careful not to accidentally reuse an inappropriate join when
# using select_related(). We used to return the parent's Detail record
# here by mistake.
d1 = Detail.objects.create(data="d1")
d2 = Detail.objects.create(data="d2")
m1 = Member.objects.create(name="m1", details=d1)
m2 = Member.objects.create(name="m2", details=d2)
Child.objects.create(person=m2, parent=m1)
obj = m1.children.select_related("person__details")[0]
self.assertEqual(obj.person.details.data, "d2")
def test_order_by_resetting(self):
# Calling order_by() with no parameters removes any existing ordering on the
# model. But it should still be possible to add new ordering after that.
qs = Author.objects.order_by().order_by("name")
self.assertIn("ORDER BY", qs.query.get_compiler(qs.db).as_sql()[0])
def test_order_by_reverse_fk(self):
# It is possible to order by reverse of foreign key, although that can lead
# to duplicate results.
c1 = SimpleCategory.objects.create(name="category1")
c2 = SimpleCategory.objects.create(name="category2")
CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c1)
self.assertSequenceEqual(
SimpleCategory.objects.order_by("categoryitem", "pk"), [c1, c2, c1]
)
def test_filter_reverse_non_integer_pk(self):
date_obj = DateTimePK.objects.create()
extra_obj = ExtraInfo.objects.create(info="extra", date=date_obj)
self.assertEqual(
DateTimePK.objects.filter(extrainfo=extra_obj).get(),
date_obj,
)
def test_ticket10181(self):
# Avoid raising an EmptyResultSet if an inner query is probably
# empty (and hence, not executed).
self.assertQuerysetEqual(
Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])), []
)
def test_ticket15316_filter_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(
name="named category1", special_name="special1"
)
c3 = SpecialCategory.objects.create(
name="named category2", special_name="special2"
)
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 2)
self.assertCountEqual(qs, [ci2, ci3])
def test_ticket15316_exclude_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(
name="named category1", special_name="special1"
)
c3 = SpecialCategory.objects.create(
name="named category2", special_name="special2"
)
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_filter_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(
name="named category1", special_name="special1"
)
c3 = SpecialCategory.objects.create(
name="named category2", special_name="special2"
)
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_exclude_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(
name="named category1", special_name="special1"
)
c3 = SpecialCategory.objects.create(
name="named category2", special_name="special2"
)
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 2)
self.assertCountEqual(qs, [ci2, ci3])
def test_ticket15316_one2one_filter_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(
category__onetoonecategory__isnull=False
).order_by("pk")
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_one2one_exclude_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_one2one_filter_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_one2one_exclude_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(
category__onetoonecategory__isnull=True
).order_by("pk")
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
class Queries5Tests(TestCase):
@classmethod
def setUpTestData(cls):
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the
# Meta.ordering will be rank3, rank2, rank1.
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
cls.n2 = Note.objects.create(note="n2", misc="bar", id=2)
e1 = ExtraInfo.objects.create(info="e1", note=cls.n1)
e2 = ExtraInfo.objects.create(info="e2", note=cls.n2)
a1 = Author.objects.create(name="a1", num=1001, extra=e1)
a2 = Author.objects.create(name="a2", num=2002, extra=e1)
a3 = Author.objects.create(name="a3", num=3003, extra=e2)
cls.rank2 = Ranking.objects.create(rank=2, author=a2)
cls.rank1 = Ranking.objects.create(rank=1, author=a3)
cls.rank3 = Ranking.objects.create(rank=3, author=a1)
def test_ordering(self):
# Cross model ordering is possible in Meta, too.
self.assertSequenceEqual(
Ranking.objects.all(),
[self.rank3, self.rank2, self.rank1],
)
self.assertSequenceEqual(
Ranking.objects.order_by("rank"),
[self.rank1, self.rank2, self.rank3],
)
# Ordering of extra() pieces is possible, too and you can mix extra
# fields and model fields in the ordering.
self.assertSequenceEqual(
Ranking.objects.extra(
tables=["django_site"], order_by=["-django_site.id", "rank"]
),
[self.rank1, self.rank2, self.rank3],
)
sql = "case when %s > 2 then 1 else 0 end" % connection.ops.quote_name("rank")
qs = Ranking.objects.extra(select={"good": sql})
self.assertEqual(
[o.good for o in qs.extra(order_by=("-good",))], [True, False, False]
)
self.assertSequenceEqual(
qs.extra(order_by=("-good", "id")),
[self.rank3, self.rank2, self.rank1],
)
# Despite having some extra aliases in the query, we can still omit
# them in a values() query.
dicts = qs.values("id", "rank").order_by("id")
self.assertEqual([d["rank"] for d in dicts], [2, 1, 3])
def test_ticket7256(self):
# An empty values() call includes all aliases, including those from an
# extra()
sql = "case when %s > 2 then 1 else 0 end" % connection.ops.quote_name("rank")
qs = Ranking.objects.extra(select={"good": sql})
dicts = qs.values().order_by("id")
for d in dicts:
del d["id"]
del d["author_id"]
self.assertEqual(
[sorted(d.items()) for d in dicts],
[
[("good", 0), ("rank", 2)],
[("good", 0), ("rank", 1)],
[("good", 1), ("rank", 3)],
],
)
def test_ticket7045(self):
# Extra tables used to crash SQL construction on the second use.
qs = Ranking.objects.extra(tables=["django_site"])
qs.query.get_compiler(qs.db).as_sql()
# test passes if this doesn't raise an exception.
qs.query.get_compiler(qs.db).as_sql()
def test_ticket9848(self):
# Make sure that updates which only filter on sub-tables don't
# inadvertently update the wrong records (bug #9848).
author_start = Author.objects.get(name="a1")
ranking_start = Ranking.objects.get(author__name="a1")
# Make sure that the IDs from different tables don't happen to match.
self.assertSequenceEqual(
Ranking.objects.filter(author__name="a1"),
[self.rank3],
)
self.assertEqual(Ranking.objects.filter(author__name="a1").update(rank=4636), 1)
r = Ranking.objects.get(author__name="a1")
self.assertEqual(r.id, ranking_start.id)
self.assertEqual(r.author.id, author_start.id)
self.assertEqual(r.rank, 4636)
r.rank = 3
r.save()
self.assertSequenceEqual(
Ranking.objects.all(),
[self.rank3, self.rank2, self.rank1],
)
def test_ticket5261(self):
# Test different empty excludes.
self.assertSequenceEqual(
Note.objects.exclude(Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.filter(~Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.filter(~Q() | ~Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.exclude(~Q() & ~Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.exclude(~Q() ^ ~Q()),
[self.n1, self.n2],
)
def test_extra_select_literal_percent_s(self):
# Allow %%s to escape select clauses
self.assertEqual(Note.objects.extra(select={"foo": "'%%s'"})[0].foo, "%s")
self.assertEqual(
Note.objects.extra(select={"foo": "'%%s bar %%s'"})[0].foo, "%s bar %s"
)
self.assertEqual(
Note.objects.extra(select={"foo": "'bar %%s'"})[0].foo, "bar %s"
)
def test_extra_select_alias_sql_injection(self):
crafted_alias = """injected_name" from "queries_note"; --"""
msg = (
"Column aliases cannot contain whitespace characters, quotation marks, "
"semicolons, or SQL comments."
)
with self.assertRaisesMessage(ValueError, msg):
Note.objects.extra(select={crafted_alias: "1"})
def test_queryset_reuse(self):
# Using querysets doesn't mutate aliases.
authors = Author.objects.filter(Q(name="a1") | Q(name="nonexistent"))
self.assertEqual(Ranking.objects.filter(author__in=authors).get(), self.rank3)
self.assertEqual(authors.count(), 1)
def test_filter_unsaved_object(self):
# These tests will catch ValueError in Django 5.0 when passing unsaved
# model instances to related filters becomes forbidden.
# msg = "Model instances passed to related filters must be saved."
msg = "Passing unsaved model instances to related filters is deprecated."
company = Company.objects.create(name="Django")
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
Employment.objects.filter(employer=Company(name="unsaved"))
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
Employment.objects.filter(employer__in=[company, Company(name="unsaved")])
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
StaffUser.objects.filter(staff=Staff(name="unsaved"))
class SelectRelatedTests(TestCase):
def test_tickets_3045_3288(self):
# Once upon a time, select_related() with circular relations would loop
# infinitely if you forgot to specify "depth". Now we set an arbitrary
# default upper bound.
self.assertQuerysetEqual(X.objects.all(), [])
self.assertQuerysetEqual(X.objects.select_related(), [])
class SubclassFKTests(TestCase):
def test_ticket7778(self):
# Model subclasses could not be deleted if a nullable foreign key
# relates to a model that relates back.
num_celebs = Celebrity.objects.count()
tvc = TvChef.objects.create(name="Huey")
self.assertEqual(Celebrity.objects.count(), num_celebs + 1)
Fan.objects.create(fan_of=tvc)
Fan.objects.create(fan_of=tvc)
tvc.delete()
# The parent object should have been deleted as well.
self.assertEqual(Celebrity.objects.count(), num_celebs)
class CustomPkTests(TestCase):
def test_ticket7371(self):
self.assertQuerysetEqual(Related.objects.order_by("custom"), [])
class NullableRelOrderingTests(TestCase):
def test_ticket10028(self):
# Ordering by model related to nullable relations(!) should use outer
# joins, so that all results are included.
p1 = Plaything.objects.create(name="p1")
self.assertSequenceEqual(Plaything.objects.all(), [p1])
def test_join_already_in_query(self):
# Ordering by model related to nullable relations should not change
# the join type of already existing joins.
Plaything.objects.create(name="p1")
s = SingleObject.objects.create(name="s")
r = RelatedObject.objects.create(single=s, f=1)
p2 = Plaything.objects.create(name="p2", others=r)
qs = Plaything.objects.filter(others__isnull=False).order_by("pk")
self.assertNotIn("JOIN", str(qs.query))
qs = Plaything.objects.filter(others__f__isnull=False).order_by("pk")
self.assertIn("INNER", str(qs.query))
qs = qs.order_by("others__single__name")
# The ordering by others__single__pk will add one new join (to single)
# and that join must be LEFT join. The already existing join to related
# objects must be kept INNER. So, we have both an INNER and a LEFT join
# in the query.
self.assertEqual(str(qs.query).count("LEFT"), 1)
self.assertEqual(str(qs.query).count("INNER"), 1)
self.assertSequenceEqual(qs, [p2])
class DisjunctiveFilterTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
cls.e1 = ExtraInfo.objects.create(info="e1", note=cls.n1)
def test_ticket7872(self):
# Another variation on the disjunctive filtering theme.
# For the purposes of this regression test, it's important that there is no
# Join object related to the LeafA we create.
l1 = LeafA.objects.create(data="first")
self.assertSequenceEqual(LeafA.objects.all(), [l1])
self.assertSequenceEqual(
LeafA.objects.filter(Q(data="first") | Q(join__b__data="second")),
[l1],
)
def test_ticket8283(self):
# Checking that applying filters after a disjunction works correctly.
self.assertSequenceEqual(
(
ExtraInfo.objects.filter(note=self.n1)
| ExtraInfo.objects.filter(info="e2")
).filter(note=self.n1),
[self.e1],
)
self.assertSequenceEqual(
(
ExtraInfo.objects.filter(info="e2")
| ExtraInfo.objects.filter(note=self.n1)
).filter(note=self.n1),
[self.e1],
)
class Queries6Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name="t1", category=generic)
cls.t2 = Tag.objects.create(name="t2", parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name="t3", parent=cls.t1)
cls.t4 = Tag.objects.create(name="t4", parent=cls.t3)
cls.t5 = Tag.objects.create(name="t5", parent=cls.t3)
n1 = Note.objects.create(note="n1", misc="foo", id=1)
cls.ann1 = Annotation.objects.create(name="a1", tag=cls.t1)
cls.ann1.notes.add(n1)
cls.ann2 = Annotation.objects.create(name="a2", tag=cls.t4)
def test_parallel_iterators(self):
# Parallel iterators work.
qs = Tag.objects.all()
i1, i2 = iter(qs), iter(qs)
self.assertEqual(repr(next(i1)), "<Tag: t1>")
self.assertEqual(repr(next(i1)), "<Tag: t2>")
self.assertEqual(repr(next(i2)), "<Tag: t1>")
self.assertEqual(repr(next(i2)), "<Tag: t2>")
self.assertEqual(repr(next(i2)), "<Tag: t3>")
self.assertEqual(repr(next(i1)), "<Tag: t3>")
qs = X.objects.all()
self.assertFalse(qs)
self.assertFalse(qs)
def test_nested_queries_sql(self):
# Nested queries should not evaluate the inner query as part of constructing the
# SQL (so we should see a nested query here, indicated by two "SELECT" calls).
qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy"))
self.assertEqual(qs.query.get_compiler(qs.db).as_sql()[0].count("SELECT"), 2)
def test_tickets_8921_9188(self):
# Incorrect SQL was being generated for certain types of exclude()
# queries that crossed multi-valued relations (#8921, #9188 and some
# preemptively discovered cases).
self.assertSequenceEqual(
PointerA.objects.filter(connection__pointerb__id=1), []
)
self.assertSequenceEqual(
PointerA.objects.exclude(connection__pointerb__id=1), []
)
self.assertSequenceEqual(
Tag.objects.exclude(children=None),
[self.t1, self.t3],
)
# This example is tricky because the parent could be NULL, so only checking
# parents with annotations omits some results (tag t1, in this case).
self.assertSequenceEqual(
Tag.objects.exclude(parent__annotation__name="a1"),
[self.t1, self.t4, self.t5],
)
# The annotation->tag link is single values and tag->children links is
# multi-valued. So we have to split the exclude filter in the middle
# and then optimize the inner query without losing results.
self.assertSequenceEqual(
Annotation.objects.exclude(tag__children__name="t2"),
[self.ann2],
)
# Nested queries are possible (although should be used with care, since
# they have performance problems on backends like MySQL.
self.assertSequenceEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")),
[self.ann1],
)
def test_ticket3739(self):
# The all() method on querysets returns a copy of the queryset.
q1 = Tag.objects.order_by("name")
self.assertIsNot(q1, q1.all())
def test_ticket_11320(self):
qs = Tag.objects.exclude(category=None).exclude(category__name="foo")
self.assertEqual(str(qs.query).count(" INNER JOIN "), 1)
def test_distinct_ordered_sliced_subquery_aggregation(self):
self.assertEqual(
Tag.objects.distinct().order_by("category__name")[:3].count(), 3
)
def test_multiple_columns_with_the_same_name_slice(self):
self.assertEqual(
list(
Tag.objects.order_by("name").values_list("name", "category__name")[:2]
),
[("t1", "Generic"), ("t2", "Generic")],
)
self.assertSequenceEqual(
Tag.objects.order_by("name").select_related("category")[:2],
[self.t1, self.t2],
)
self.assertEqual(
list(Tag.objects.order_by("-name").values_list("name", "parent__name")[:2]),
[("t5", "t3"), ("t4", "t3")],
)
self.assertSequenceEqual(
Tag.objects.order_by("-name").select_related("parent")[:2],
[self.t5, self.t4],
)
def test_col_alias_quoted(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertEqual(
Tag.objects.values("parent")
.annotate(
tag_per_parent=Count("pk"),
)
.aggregate(Max("tag_per_parent")),
{"tag_per_parent__max": 2},
)
sql = captured_queries[0]["sql"]
self.assertIn("AS %s" % connection.ops.quote_name("col1"), sql)
def test_xor_subquery(self):
self.assertSequenceEqual(
Tag.objects.filter(
Exists(Tag.objects.filter(id=OuterRef("id"), name="t3"))
^ Exists(Tag.objects.filter(id=OuterRef("id"), parent=self.t1))
),
[self.t2],
)
class RawQueriesTests(TestCase):
@classmethod
def setUpTestData(cls):
Note.objects.create(note="n1", misc="foo", id=1)
def test_ticket14729(self):
# Test representation of raw query with one or few parameters passed as list
query = "SELECT * FROM queries_note WHERE note = %s"
params = ["n1"]
qs = Note.objects.raw(query, params=params)
self.assertEqual(
repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1>"
)
query = "SELECT * FROM queries_note WHERE note = %s and misc = %s"
params = ["n1", "foo"]
qs = Note.objects.raw(query, params=params)
self.assertEqual(
repr(qs),
"<RawQuerySet: SELECT * FROM queries_note WHERE note = n1 and misc = foo>",
)
class GeneratorExpressionTests(SimpleTestCase):
def test_ticket10432(self):
# Using an empty iterator as the rvalue for an "__in"
# lookup is legal.
self.assertCountEqual(Note.objects.filter(pk__in=iter(())), [])
class ComparisonTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
e1 = ExtraInfo.objects.create(info="e1", note=cls.n1)
cls.a2 = Author.objects.create(name="a2", num=2002, extra=e1)
def test_ticket8597(self):
# Regression tests for case-insensitive comparisons
item_ab = Item.objects.create(
name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1
)
item_xy = Item.objects.create(
name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1
)
self.assertSequenceEqual(
Item.objects.filter(name__iexact="A_b"),
[item_ab],
)
self.assertSequenceEqual(
Item.objects.filter(name__iexact="x%Y"),
[item_xy],
)
self.assertSequenceEqual(
Item.objects.filter(name__istartswith="A_b"),
[item_ab],
)
self.assertSequenceEqual(
Item.objects.filter(name__iendswith="A_b"),
[item_ab],
)
class ExistsSql(TestCase):
def test_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertFalse(Tag.objects.exists())
# Ok - so the exist query worked - but did it include too many columns?
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]["sql"]
id, name = connection.ops.quote_name("id"), connection.ops.quote_name("name")
self.assertNotIn(id, qstr)
self.assertNotIn(name, qstr)
def test_distinct_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertIs(Article.objects.distinct().exists(), False)
self.assertEqual(len(captured_queries), 1)
captured_sql = captured_queries[0]["sql"]
self.assertNotIn(connection.ops.quote_name("id"), captured_sql)
self.assertNotIn(connection.ops.quote_name("name"), captured_sql)
def test_sliced_distinct_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertIs(Article.objects.distinct()[1:3].exists(), False)
self.assertEqual(len(captured_queries), 1)
captured_sql = captured_queries[0]["sql"]
self.assertIn(connection.ops.quote_name("id"), captured_sql)
self.assertIn(connection.ops.quote_name("name"), captured_sql)
def test_ticket_18414(self):
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="two", created=datetime.datetime.now())
self.assertTrue(Article.objects.exists())
self.assertTrue(Article.objects.distinct().exists())
self.assertTrue(Article.objects.distinct()[1:3].exists())
self.assertFalse(Article.objects.distinct()[1:1].exists())
@skipUnlessDBFeature("can_distinct_on_fields")
def test_ticket_18414_distinct_on(self):
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="two", created=datetime.datetime.now())
self.assertTrue(Article.objects.distinct("name").exists())
self.assertTrue(Article.objects.distinct("name")[1:2].exists())
self.assertFalse(Article.objects.distinct("name")[2:3].exists())
class QuerysetOrderedTests(unittest.TestCase):
"""
Tests for the Queryset.ordered attribute.
"""
def test_no_default_or_explicit_ordering(self):
self.assertIs(Annotation.objects.all().ordered, False)
def test_cleared_default_ordering(self):
self.assertIs(Tag.objects.all().ordered, True)
self.assertIs(Tag.objects.order_by().ordered, False)
def test_explicit_ordering(self):
self.assertIs(Annotation.objects.order_by("id").ordered, True)
def test_empty_queryset(self):
self.assertIs(Annotation.objects.none().ordered, True)
def test_order_by_extra(self):
self.assertIs(Annotation.objects.extra(order_by=["id"]).ordered, True)
def test_annotated_ordering(self):
qs = Annotation.objects.annotate(num_notes=Count("notes"))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by("num_notes").ordered, True)
def test_annotated_default_ordering(self):
qs = Tag.objects.annotate(num_notes=Count("pk"))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by("name").ordered, True)
def test_annotated_values_default_ordering(self):
qs = Tag.objects.values("name").annotate(num_notes=Count("pk"))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by("name").ordered, True)
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
class SubqueryTests(TestCase):
@classmethod
def setUpTestData(cls):
NamedCategory.objects.create(id=1, name="first")
NamedCategory.objects.create(id=2, name="second")
NamedCategory.objects.create(id=3, name="third")
NamedCategory.objects.create(id=4, name="fourth")
def test_ordered_subselect(self):
"Subselects honor any manual ordering"
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[0:2]
)
self.assertEqual(set(query.values_list("id", flat=True)), {3, 4})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[:2]
)
self.assertEqual(set(query.values_list("id", flat=True)), {3, 4})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[1:2]
)
self.assertEqual(set(query.values_list("id", flat=True)), {3})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[2:]
)
self.assertEqual(set(query.values_list("id", flat=True)), {1, 2})
def test_slice_subquery_and_query(self):
"""
Slice a query that has a sliced subquery
"""
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[0:2]
)[0:2]
self.assertEqual({x.id for x in query}, {3, 4})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[1:3]
)[1:3]
self.assertEqual({x.id for x in query}, {3})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[2:]
)[1:]
self.assertEqual({x.id for x in query}, {2})
def test_related_sliced_subquery(self):
"""
Related objects constraints can safely contain sliced subqueries.
refs #22434
"""
generic = NamedCategory.objects.create(id=5, name="Generic")
t1 = Tag.objects.create(name="t1", category=generic)
t2 = Tag.objects.create(name="t2", category=generic)
ManagedModel.objects.create(data="mm1", tag=t1, public=True)
mm2 = ManagedModel.objects.create(data="mm2", tag=t2, public=True)
query = ManagedModel.normal_manager.filter(
tag__in=Tag.objects.order_by("-id")[:1]
)
self.assertEqual({x.id for x in query}, {mm2.id})
def test_sliced_delete(self):
"Delete queries can safely contain sliced subqueries"
DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[0:1]
).delete()
self.assertEqual(
set(DumbCategory.objects.values_list("id", flat=True)), {1, 2, 3}
)
DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[1:2]
).delete()
self.assertEqual(set(DumbCategory.objects.values_list("id", flat=True)), {1, 3})
DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[1:]
).delete()
self.assertEqual(set(DumbCategory.objects.values_list("id", flat=True)), {3})
def test_distinct_ordered_sliced_subquery(self):
# Implicit values('id').
self.assertSequenceEqual(
NamedCategory.objects.filter(
id__in=NamedCategory.objects.distinct().order_by("name")[0:2],
)
.order_by("name")
.values_list("name", flat=True),
["first", "fourth"],
)
# Explicit values('id').
self.assertSequenceEqual(
NamedCategory.objects.filter(
id__in=NamedCategory.objects.distinct()
.order_by("-name")
.values("id")[0:2],
)
.order_by("name")
.values_list("name", flat=True),
["second", "third"],
)
# Annotated value.
self.assertSequenceEqual(
DumbCategory.objects.filter(
id__in=DumbCategory.objects.annotate(double_id=F("id") * 2)
.order_by("id")
.distinct()
.values("double_id")[0:2],
)
.order_by("id")
.values_list("id", flat=True),
[2, 4],
)
class QuerySetBitwiseOperationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.school = School.objects.create()
cls.room_1 = Classroom.objects.create(
school=cls.school, has_blackboard=False, name="Room 1"
)
cls.room_2 = Classroom.objects.create(
school=cls.school, has_blackboard=True, name="Room 2"
)
cls.room_3 = Classroom.objects.create(
school=cls.school, has_blackboard=True, name="Room 3"
)
cls.room_4 = Classroom.objects.create(
school=cls.school, has_blackboard=False, name="Room 4"
)
tag = Tag.objects.create()
cls.annotation_1 = Annotation.objects.create(tag=tag)
annotation_2 = Annotation.objects.create(tag=tag)
note = cls.annotation_1.notes.create(tag=tag)
cls.base_user_1 = BaseUser.objects.create(annotation=cls.annotation_1)
cls.base_user_2 = BaseUser.objects.create(annotation=annotation_2)
cls.task = Task.objects.create(
owner=cls.base_user_2,
creator=cls.base_user_2,
note=note,
)
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_or_with_rhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)
qs2 = Classroom.objects.filter(has_blackboard=False)[:1]
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_3])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_or_with_lhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)[:1]
qs2 = Classroom.objects.filter(has_blackboard=False)
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_4])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_or_with_both_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=False)[:1]
qs2 = Classroom.objects.filter(has_blackboard=True)[:1]
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_or_with_both_slice_and_ordering(self):
qs1 = Classroom.objects.filter(has_blackboard=False).order_by("-pk")[:1]
qs2 = Classroom.objects.filter(has_blackboard=True).order_by("-name")[:1]
self.assertCountEqual(qs1 | qs2, [self.room_3, self.room_4])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_xor_with_rhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)
qs2 = Classroom.objects.filter(has_blackboard=False)[:1]
self.assertCountEqual(qs1 ^ qs2, [self.room_1, self.room_2, self.room_3])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_xor_with_lhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)[:1]
qs2 = Classroom.objects.filter(has_blackboard=False)
self.assertCountEqual(qs1 ^ qs2, [self.room_1, self.room_2, self.room_4])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_xor_with_both_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=False)[:1]
qs2 = Classroom.objects.filter(has_blackboard=True)[:1]
self.assertCountEqual(qs1 ^ qs2, [self.room_1, self.room_2])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_xor_with_both_slice_and_ordering(self):
qs1 = Classroom.objects.filter(has_blackboard=False).order_by("-pk")[:1]
qs2 = Classroom.objects.filter(has_blackboard=True).order_by("-name")[:1]
self.assertCountEqual(qs1 ^ qs2, [self.room_3, self.room_4])
def test_subquery_aliases(self):
combined = School.objects.filter(pk__isnull=False) & School.objects.filter(
Exists(
Classroom.objects.filter(
has_blackboard=True,
school=OuterRef("pk"),
)
),
)
self.assertSequenceEqual(combined, [self.school])
nested_combined = School.objects.filter(pk__in=combined.values("pk"))
self.assertSequenceEqual(nested_combined, [self.school])
def test_conflicting_aliases_during_combine(self):
qs1 = self.annotation_1.baseuser_set.all()
qs2 = BaseUser.objects.filter(
Q(owner__note__in=self.annotation_1.notes.all())
| Q(creator__note__in=self.annotation_1.notes.all())
)
self.assertSequenceEqual(qs1, [self.base_user_1])
self.assertSequenceEqual(qs2, [self.base_user_2])
self.assertCountEqual(qs2 | qs1, qs1 | qs2)
self.assertCountEqual(qs2 | qs1, [self.base_user_1, self.base_user_2])
class CloneTests(TestCase):
def test_evaluated_queryset_as_argument(self):
"""
If a queryset is already evaluated, it can still be used as a query arg.
"""
n = Note(note="Test1", misc="misc")
n.save()
e = ExtraInfo(info="good", note=n)
e.save()
n_list = Note.objects.all()
# Evaluate the Note queryset, populating the query cache
list(n_list)
# Make one of cached results unpickable.
n_list._result_cache[0].lock = Lock()
with self.assertRaises(TypeError):
pickle.dumps(n_list)
# Use the note queryset in a query, and evaluate
# that query in a way that involves cloning.
self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, "good")
def test_no_model_options_cloning(self):
"""
Cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta)
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail(
"Model options shouldn't be cloned."
)
try:
Note.objects.filter(pk__lte=F("pk") + 1).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
def test_no_fields_cloning(self):
"""
Cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta.get_field("misc"))
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail(
"Model fields shouldn't be cloned"
)
try:
Note.objects.filter(note=F("misc")).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
class EmptyQuerySetTests(SimpleTestCase):
def test_emptyqueryset_values(self):
# #14366 -- Calling .values() on an empty QuerySet and then cloning
# that should not cause an error
self.assertCountEqual(Number.objects.none().values("num").order_by("num"), [])
def test_values_subquery(self):
self.assertCountEqual(
Number.objects.filter(pk__in=Number.objects.none().values("pk")), []
)
self.assertCountEqual(
Number.objects.filter(pk__in=Number.objects.none().values_list("pk")), []
)
def test_ticket_19151(self):
# #19151 -- Calling .values() or .values_list() on an empty QuerySet
# should return an empty QuerySet and not cause an error.
q = Author.objects.none()
self.assertCountEqual(q.values(), [])
self.assertCountEqual(q.values_list(), [])
class ValuesQuerysetTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=72)
def test_flat_values_list(self):
qs = Number.objects.values_list("num")
qs = qs.values_list("num", flat=True)
self.assertSequenceEqual(qs, [72])
def test_extra_values(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={"value_plus_x": "num+%s", "value_minus_x": "num-%s"},
select_params=(1, 2),
)
qs = qs.order_by("value_minus_x")
qs = qs.values("num")
self.assertSequenceEqual(qs, [{"num": 72}])
def test_extra_values_order_twice(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={"value_plus_one": "num+1", "value_minus_one": "num-1"}
)
qs = qs.order_by("value_minus_one").order_by("value_plus_one")
qs = qs.values("num")
self.assertSequenceEqual(qs, [{"num": 72}])
def test_extra_values_order_multiple(self):
# Postgres doesn't allow constants in order by, so check for that.
qs = Number.objects.extra(
select={
"value_plus_one": "num+1",
"value_minus_one": "num-1",
"constant_value": "1",
}
)
qs = qs.order_by("value_plus_one", "value_minus_one", "constant_value")
qs = qs.values("num")
self.assertSequenceEqual(qs, [{"num": 72}])
def test_extra_values_order_in_extra(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={"value_plus_one": "num+1", "value_minus_one": "num-1"},
order_by=["value_minus_one"],
)
qs = qs.values("num")
def test_extra_select_params_values_order_in_extra(self):
# testing for 23259 issue
qs = Number.objects.extra(
select={"value_plus_x": "num+%s"},
select_params=[1],
order_by=["value_plus_x"],
)
qs = qs.filter(num=72)
qs = qs.values("num")
self.assertSequenceEqual(qs, [{"num": 72}])
def test_extra_multiple_select_params_values_order_by(self):
# testing for 23259 issue
qs = Number.objects.extra(
select={"value_plus_x": "num+%s", "value_minus_x": "num-%s"},
select_params=(72, 72),
)
qs = qs.order_by("value_minus_x")
qs = qs.filter(num=1)
qs = qs.values("num")
self.assertSequenceEqual(qs, [])
def test_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={"value_plus_one": "num+1"})
qs = qs.order_by("value_plus_one")
qs = qs.values_list("num")
self.assertSequenceEqual(qs, [(72,)])
def test_flat_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={"value_plus_one": "num+1"})
qs = qs.order_by("value_plus_one")
qs = qs.values_list("num", flat=True)
self.assertSequenceEqual(qs, [72])
def test_field_error_values_list(self):
# see #23443
msg = (
"Cannot resolve keyword %r into field. Join on 'name' not permitted."
% "foo"
)
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.values_list("name__foo")
def test_named_values_list_flat(self):
msg = "'flat' and 'named' can't be used together."
with self.assertRaisesMessage(TypeError, msg):
Number.objects.values_list("num", flat=True, named=True)
def test_named_values_list_bad_field_name(self):
msg = "Type names and field names must be valid identifiers: '1'"
with self.assertRaisesMessage(ValueError, msg):
Number.objects.extra(select={"1": "num+1"}).values_list(
"1", named=True
).first()
def test_named_values_list_with_fields(self):
qs = Number.objects.extra(select={"num2": "num+1"}).annotate(Count("id"))
values = qs.values_list("num", "num2", named=True).first()
self.assertEqual(type(values).__name__, "Row")
self.assertEqual(values._fields, ("num", "num2"))
self.assertEqual(values.num, 72)
self.assertEqual(values.num2, 73)
def test_named_values_list_without_fields(self):
qs = Number.objects.extra(select={"num2": "num+1"}).annotate(Count("id"))
values = qs.values_list(named=True).first()
self.assertEqual(type(values).__name__, "Row")
self.assertEqual(
values._fields,
("num2", "id", "num", "other_num", "another_num", "id__count"),
)
self.assertEqual(values.num, 72)
self.assertEqual(values.num2, 73)
self.assertEqual(values.id__count, 1)
def test_named_values_list_expression_with_default_alias(self):
expr = Count("id")
values = (
Number.objects.annotate(id__count1=expr)
.values_list(expr, "id__count1", named=True)
.first()
)
self.assertEqual(values._fields, ("id__count2", "id__count1"))
def test_named_values_list_expression(self):
expr = F("num") + 1
qs = Number.objects.annotate(combinedexpression1=expr).values_list(
expr, "combinedexpression1", named=True
)
values = qs.first()
self.assertEqual(values._fields, ("combinedexpression2", "combinedexpression1"))
def test_named_values_pickle(self):
value = Number.objects.values_list("num", "other_num", named=True).get()
self.assertEqual(value, (72, None))
self.assertEqual(pickle.loads(pickle.dumps(value)), value)
class QuerySetSupportsPythonIdioms(TestCase):
@classmethod
def setUpTestData(cls):
some_date = datetime.datetime(2014, 5, 16, 12, 1)
cls.articles = [
Article.objects.create(name=f"Article {i}", created=some_date)
for i in range(1, 8)
]
def get_ordered_articles(self):
return Article.objects.order_by("name")
def test_can_get_items_using_index_and_slice_notation(self):
self.assertEqual(self.get_ordered_articles()[0].name, "Article 1")
self.assertSequenceEqual(
self.get_ordered_articles()[1:3],
[self.articles[1], self.articles[2]],
)
def test_slicing_with_steps_can_be_used(self):
self.assertSequenceEqual(
self.get_ordered_articles()[::2],
[
self.articles[0],
self.articles[2],
self.articles[4],
self.articles[6],
],
)
def test_slicing_without_step_is_lazy(self):
with self.assertNumQueries(0):
self.get_ordered_articles()[0:5]
def test_slicing_with_tests_is_not_lazy(self):
with self.assertNumQueries(1):
self.get_ordered_articles()[0:5:3]
def test_slicing_can_slice_again_after_slicing(self):
self.assertSequenceEqual(
self.get_ordered_articles()[0:5][0:2],
[self.articles[0], self.articles[1]],
)
self.assertSequenceEqual(
self.get_ordered_articles()[0:5][4:], [self.articles[4]]
)
self.assertSequenceEqual(self.get_ordered_articles()[0:5][5:], [])
# Some more tests!
self.assertSequenceEqual(
self.get_ordered_articles()[2:][0:2],
[self.articles[2], self.articles[3]],
)
self.assertSequenceEqual(
self.get_ordered_articles()[2:][:2],
[self.articles[2], self.articles[3]],
)
self.assertSequenceEqual(
self.get_ordered_articles()[2:][2:3], [self.articles[4]]
)
# Using an offset without a limit is also possible.
self.assertSequenceEqual(
self.get_ordered_articles()[5:],
[self.articles[5], self.articles[6]],
)
def test_slicing_cannot_filter_queryset_once_sliced(self):
msg = "Cannot filter a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:5].filter(id=1)
def test_slicing_cannot_reorder_queryset_once_sliced(self):
msg = "Cannot reorder a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:5].order_by("id")
def test_slicing_cannot_combine_queries_once_sliced(self):
msg = "Cannot combine queries once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:1] & Article.objects.all()[4:5]
def test_slicing_negative_indexing_not_supported_for_single_element(self):
"""hint: inverting your ordering might do what you need"""
msg = "Negative indexing is not supported."
with self.assertRaisesMessage(ValueError, msg):
Article.objects.all()[-1]
def test_slicing_negative_indexing_not_supported_for_range(self):
"""hint: inverting your ordering might do what you need"""
msg = "Negative indexing is not supported."
with self.assertRaisesMessage(ValueError, msg):
Article.objects.all()[0:-5]
with self.assertRaisesMessage(ValueError, msg):
Article.objects.all()[-1:]
def test_invalid_index(self):
msg = "QuerySet indices must be integers or slices, not str."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()["foo"]
def test_can_get_number_of_items_in_queryset_using_standard_len(self):
self.assertEqual(len(Article.objects.filter(name__exact="Article 1")), 1)
def test_can_combine_queries_using_and_and_or_operators(self):
s1 = Article.objects.filter(name__exact="Article 1")
s2 = Article.objects.filter(name__exact="Article 2")
self.assertSequenceEqual(
(s1 | s2).order_by("name"),
[self.articles[0], self.articles[1]],
)
self.assertSequenceEqual(s1 & s2, [])
class WeirdQuerysetSlicingTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=1)
Number.objects.create(num=2)
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="two", created=datetime.datetime.now())
Article.objects.create(name="three", created=datetime.datetime.now())
Article.objects.create(name="four", created=datetime.datetime.now())
food = Food.objects.create(name="spam")
Eaten.objects.create(meal="spam with eggs", food=food)
def test_tickets_7698_10202(self):
# People like to slice with '0' as the high-water mark.
self.assertQuerysetEqual(Article.objects.all()[0:0], [])
self.assertQuerysetEqual(Article.objects.all()[0:0][:10], [])
self.assertEqual(Article.objects.all()[:0].count(), 0)
msg = "Cannot change a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[:0].latest("created")
def test_empty_resultset_sql(self):
# ticket #12192
self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1]))
def test_empty_sliced_subquery(self):
self.assertEqual(
Eaten.objects.filter(food__in=Food.objects.all()[0:0]).count(), 0
)
def test_empty_sliced_subquery_exclude(self):
self.assertEqual(
Eaten.objects.exclude(food__in=Food.objects.all()[0:0]).count(), 1
)
def test_zero_length_values_slicing(self):
n = 42
with self.assertNumQueries(0):
self.assertQuerysetEqual(Article.objects.values()[n:n], [])
self.assertQuerysetEqual(Article.objects.values_list()[n:n], [])
class EscapingTests(TestCase):
def test_ticket_7302(self):
# Reserved names are appropriately escaped
r_a = ReservedName.objects.create(name="a", order=42)
r_b = ReservedName.objects.create(name="b", order=37)
self.assertSequenceEqual(
ReservedName.objects.order_by("order"),
[r_b, r_a],
)
self.assertSequenceEqual(
ReservedName.objects.extra(
select={"stuff": "name"}, order_by=("order", "stuff")
),
[r_b, r_a],
)
class ToFieldTests(TestCase):
def test_in_query(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food__in=[apple, pear])),
{lunch, dinner},
)
def test_in_subquery(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Eaten.objects.filter(food__in=Food.objects.filter(name="apple"))),
{lunch},
)
self.assertEqual(
set(
Eaten.objects.filter(
food__in=Food.objects.filter(name="apple").values("eaten__meal")
)
),
set(),
)
self.assertEqual(
set(Food.objects.filter(eaten__in=Eaten.objects.filter(meal="lunch"))),
{apple},
)
def test_nested_in_subquery(self):
extra = ExtraInfo.objects.create()
author = Author.objects.create(num=42, extra=extra)
report = Report.objects.create(creator=author)
comment = ReportComment.objects.create(report=report)
comments = ReportComment.objects.filter(
report__in=Report.objects.filter(
creator__in=extra.author_set.all(),
),
)
self.assertSequenceEqual(comments, [comment])
def test_reverse_in(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch_apple = Eaten.objects.create(food=apple, meal="lunch")
lunch_pear = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])), {apple, pear}
)
def test_single_object(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=apple, meal="dinner")
self.assertEqual(set(Eaten.objects.filter(food=apple)), {lunch, dinner})
def test_single_object_reverse(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(set(Food.objects.filter(eaten=lunch)), {apple})
def test_recursive_fk(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(list(Node.objects.filter(parent=node1)), [node2])
def test_recursive_fk_reverse(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(list(Node.objects.filter(node=node2)), [node1])
class IsNullTests(TestCase):
def test_primary_key(self):
custom = CustomPk.objects.create(name="pk")
null = Related.objects.create()
notnull = Related.objects.create(custom=custom)
self.assertSequenceEqual(
Related.objects.filter(custom__isnull=False), [notnull]
)
self.assertSequenceEqual(Related.objects.filter(custom__isnull=True), [null])
def test_to_field(self):
apple = Food.objects.create(name="apple")
e1 = Eaten.objects.create(food=apple, meal="lunch")
e2 = Eaten.objects.create(meal="lunch")
self.assertSequenceEqual(
Eaten.objects.filter(food__isnull=False),
[e1],
)
self.assertSequenceEqual(
Eaten.objects.filter(food__isnull=True),
[e2],
)
class ConditionalTests(TestCase):
"""Tests whose execution depend on different environment conditions like
Python version or DB backend features"""
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name="t1", category=generic)
Tag.objects.create(name="t2", parent=t1, category=generic)
t3 = Tag.objects.create(name="t3", parent=t1)
Tag.objects.create(name="t4", parent=t3)
Tag.objects.create(name="t5", parent=t3)
def test_infinite_loop(self):
# If you're not careful, it's possible to introduce infinite loops via
# default ordering on foreign keys in a cycle. We detect that.
with self.assertRaisesMessage(FieldError, "Infinite loop caused by ordering."):
list(LoopX.objects.all()) # Force queryset evaluation with list()
with self.assertRaisesMessage(FieldError, "Infinite loop caused by ordering."):
list(LoopZ.objects.all()) # Force queryset evaluation with list()
# Note that this doesn't cause an infinite loop, since the default
# ordering on the Tag model is empty (and thus defaults to using "id"
# for the related field).
self.assertEqual(len(Tag.objects.order_by("parent")), 5)
# ... but you can still order in a non-recursive fashion among linked
# fields (the previous test failed because the default ordering was
# recursive).
self.assertQuerysetEqual(LoopX.objects.order_by("y__x__y__x__id"), [])
# When grouping without specifying ordering, we add an explicit "ORDER BY NULL"
# portion in MySQL to prevent unnecessary sorting.
@skipUnlessDBFeature("requires_explicit_null_ordering_when_grouping")
def test_null_ordering_added(self):
query = Tag.objects.values_list("parent_id", flat=True).order_by().query
query.group_by = ["parent_id"]
sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0]
fragment = "ORDER BY "
pos = sql.find(fragment)
self.assertEqual(sql.find(fragment, pos + 1), -1)
self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment))
def test_in_list_limit(self):
# The "in" lookup works with lists of 1000 items or more.
# The numbers amount is picked to force three different IN batches
# for Oracle, yet to be less than 2100 parameter limit for MSSQL.
numbers = list(range(2050))
max_query_params = connection.features.max_query_params
if max_query_params is None or max_query_params >= len(numbers):
Number.objects.bulk_create(Number(num=num) for num in numbers)
for number in [1000, 1001, 2000, len(numbers)]:
with self.subTest(number=number):
self.assertEqual(
Number.objects.filter(num__in=numbers[:number]).count(), number
)
class UnionTests(unittest.TestCase):
"""
Tests for the union of two querysets. Bug #12252.
"""
@classmethod
def setUpTestData(cls):
objectas = []
objectbs = []
objectcs = []
a_info = ["one", "two", "three"]
for name in a_info:
o = ObjectA(name=name)
o.save()
objectas.append(o)
b_info = [
("un", 1, objectas[0]),
("deux", 2, objectas[0]),
("trois", 3, objectas[2]),
]
for name, number, objecta in b_info:
o = ObjectB(name=name, num=number, objecta=objecta)
o.save()
objectbs.append(o)
c_info = [("ein", objectas[2], objectbs[2]), ("zwei", objectas[1], objectbs[1])]
for name, objecta, objectb in c_info:
o = ObjectC(name=name, objecta=objecta, objectb=objectb)
o.save()
objectcs.append(o)
def check_union(self, model, Q1, Q2):
filter = model.objects.filter
self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2)))
self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2)))
def test_A_AB(self):
Q1 = Q(name="two")
Q2 = Q(objectb__name="deux")
self.check_union(ObjectA, Q1, Q2)
def test_A_AB2(self):
Q1 = Q(name="two")
Q2 = Q(objectb__name="deux", objectb__num=2)
self.check_union(ObjectA, Q1, Q2)
def test_AB_ACB(self):
Q1 = Q(objectb__name="deux")
Q2 = Q(objectc__objectb__name="deux")
self.check_union(ObjectA, Q1, Q2)
def test_BAB_BAC(self):
Q1 = Q(objecta__objectb__name="deux")
Q2 = Q(objecta__objectc__name="ein")
self.check_union(ObjectB, Q1, Q2)
def test_BAB_BACB(self):
Q1 = Q(objecta__objectb__name="deux")
Q2 = Q(objecta__objectc__objectb__name="trois")
self.check_union(ObjectB, Q1, Q2)
def test_BA_BCA__BAB_BAC_BCA(self):
Q1 = Q(objecta__name="one", objectc__objecta__name="two")
Q2 = Q(
objecta__objectc__name="ein",
objectc__objecta__name="three",
objecta__objectb__name="trois",
)
self.check_union(ObjectB, Q1, Q2)
class DefaultValuesInsertTest(TestCase):
def test_no_extra_params(self):
"""
Can create an instance of a model with only the PK field (#17056)."
"""
DumbCategory.objects.create()
class ExcludeTests(TestCase):
@classmethod
def setUpTestData(cls):
f1 = Food.objects.create(name="apples")
cls.f2 = Food.objects.create(name="oranges")
Eaten.objects.create(food=f1, meal="dinner")
cls.j1 = Job.objects.create(name="Manager")
cls.r1 = Responsibility.objects.create(description="Playing golf")
cls.j2 = Job.objects.create(name="Programmer")
cls.r2 = Responsibility.objects.create(description="Programming")
JobResponsibilities.objects.create(job=cls.j1, responsibility=cls.r1)
JobResponsibilities.objects.create(job=cls.j2, responsibility=cls.r2)
def test_to_field(self):
self.assertSequenceEqual(
Food.objects.exclude(eaten__meal="dinner"),
[self.f2],
)
self.assertSequenceEqual(
Job.objects.exclude(responsibilities__description="Playing golf"),
[self.j2],
)
self.assertSequenceEqual(
Responsibility.objects.exclude(jobs__name="Manager"),
[self.r2],
)
def test_ticket14511(self):
alex = Person.objects.get_or_create(name="Alex")[0]
jane = Person.objects.get_or_create(name="Jane")[0]
oracle = Company.objects.get_or_create(name="Oracle")[0]
google = Company.objects.get_or_create(name="Google")[0]
microsoft = Company.objects.get_or_create(name="Microsoft")[0]
intel = Company.objects.get_or_create(name="Intel")[0]
def employ(employer, employee, title):
Employment.objects.get_or_create(
employee=employee, employer=employer, title=title
)
employ(oracle, alex, "Engineer")
employ(oracle, alex, "Developer")
employ(google, alex, "Engineer")
employ(google, alex, "Manager")
employ(microsoft, alex, "Manager")
employ(intel, alex, "Manager")
employ(microsoft, jane, "Developer")
employ(intel, jane, "Manager")
alex_tech_employers = (
alex.employers.filter(employment__title__in=("Engineer", "Developer"))
.distinct()
.order_by("name")
)
self.assertSequenceEqual(alex_tech_employers, [google, oracle])
alex_nontech_employers = (
alex.employers.exclude(employment__title__in=("Engineer", "Developer"))
.distinct()
.order_by("name")
)
self.assertSequenceEqual(alex_nontech_employers, [google, intel, microsoft])
def test_exclude_reverse_fk_field_ref(self):
tag = Tag.objects.create()
Note.objects.create(tag=tag, note="note")
annotation = Annotation.objects.create(name="annotation", tag=tag)
self.assertEqual(
Annotation.objects.exclude(tag__note__note=F("name")).get(), annotation
)
def test_exclude_with_circular_fk_relation(self):
self.assertEqual(
ObjectB.objects.exclude(objecta__objectb__name=F("name")).count(), 0
)
def test_subquery_exclude_outerref(self):
qs = JobResponsibilities.objects.filter(
Exists(Responsibility.objects.exclude(jobs=OuterRef("job"))),
)
self.assertTrue(qs.exists())
self.r1.delete()
self.assertFalse(qs.exists())
def test_exclude_nullable_fields(self):
number = Number.objects.create(num=1, other_num=1)
Number.objects.create(num=2, other_num=2, another_num=2)
self.assertSequenceEqual(
Number.objects.exclude(other_num=F("another_num")),
[number],
)
self.assertSequenceEqual(
Number.objects.exclude(num=F("another_num")),
[number],
)
def test_exclude_multivalued_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertSequenceEqual(
Job.objects.exclude(responsibilities__description="Programming"),
[self.j1],
)
self.assertIn("exists", captured_queries[0]["sql"].lower())
def test_exclude_subquery(self):
subquery = JobResponsibilities.objects.filter(
responsibility__description="bar",
) | JobResponsibilities.objects.exclude(
job__responsibilities__description="foo",
)
self.assertCountEqual(
Job.objects.annotate(
responsibility=subquery.filter(job=OuterRef("name"),).values(
"id"
)[:1]
),
[self.j1, self.j2],
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_exclude_unsaved_o2o_object(self):
jack = Staff.objects.create(name="jack")
jack_staff = StaffUser.objects.create(staff=jack)
unsaved_object = Staff(name="jane")
self.assertIsNone(unsaved_object.pk)
self.assertSequenceEqual(
StaffUser.objects.exclude(staff=unsaved_object), [jack_staff]
)
def test_exclude_unsaved_object(self):
# These tests will catch ValueError in Django 5.0 when passing unsaved
# model instances to related filters becomes forbidden.
# msg = "Model instances passed to related filters must be saved."
company = Company.objects.create(name="Django")
msg = "Passing unsaved model instances to related filters is deprecated."
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
Employment.objects.exclude(employer=Company(name="unsaved"))
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
Employment.objects.exclude(employer__in=[company, Company(name="unsaved")])
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
StaffUser.objects.exclude(staff=Staff(name="unsaved"))
class ExcludeTest17600(TestCase):
"""
Some regressiontests for ticket #17600. Some of these likely duplicate
other existing tests.
"""
@classmethod
def setUpTestData(cls):
# Create a few Orders.
cls.o1 = Order.objects.create(pk=1)
cls.o2 = Order.objects.create(pk=2)
cls.o3 = Order.objects.create(pk=3)
# Create some OrderItems for the first order with homogeneous
# status_id values
cls.oi1 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi2 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi3 = OrderItem.objects.create(order=cls.o1, status=1)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi4 = OrderItem.objects.create(order=cls.o2, status=1)
cls.oi5 = OrderItem.objects.create(order=cls.o2, status=2)
cls.oi6 = OrderItem.objects.create(order=cls.o2, status=3)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi7 = OrderItem.objects.create(order=cls.o3, status=2)
cls.oi8 = OrderItem.objects.create(order=cls.o3, status=3)
cls.oi9 = OrderItem.objects.create(order=cls.o3, status=4)
def test_exclude_plain(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(items__status=1),
[self.o3],
)
def test_exclude_plain_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(items__status=1).distinct(),
[self.o3],
)
def test_exclude_with_q_object_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(Q(items__status=1)).distinct(),
[self.o3],
)
def test_exclude_with_q_object_no_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(Q(items__status=1)),
[self.o3],
)
def test_exclude_with_q_is_equal_to_plain_exclude(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1).distinct()),
list(Order.objects.exclude(Q(items__status=1)).distinct()),
)
def test_exclude_with_q_is_equal_to_plain_exclude_variation(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1)),
list(Order.objects.exclude(Q(items__status=1)).distinct()),
)
@unittest.expectedFailure
def test_only_orders_with_all_items_having_status_1(self):
"""
This should only return orders having ALL items set to status 1, or
those items not having any orders at all. The correct way to write
this query in SQL seems to be using two nested subqueries.
"""
self.assertQuerysetEqual(
Order.objects.exclude(~Q(items__status=1)).distinct(),
[self.o1],
)
class Exclude15786(TestCase):
"""Regression test for #15786"""
def test_ticket15786(self):
c1 = SimpleCategory.objects.create(name="c1")
c2 = SimpleCategory.objects.create(name="c2")
OneToOneCategory.objects.create(category=c1)
OneToOneCategory.objects.create(category=c2)
rel = CategoryRelationship.objects.create(first=c1, second=c2)
self.assertEqual(
CategoryRelationship.objects.exclude(
first__onetoonecategory=F("second__onetoonecategory")
).get(),
rel,
)
class NullInExcludeTest(TestCase):
@classmethod
def setUpTestData(cls):
NullableName.objects.create(name="i1")
NullableName.objects.create()
def test_null_in_exclude_qs(self):
none_val = "" if connection.features.interprets_empty_strings_as_nulls else None
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[]),
["i1", none_val],
attrgetter("name"),
)
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=["i1"]),
[none_val],
attrgetter("name"),
)
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=["i3"]),
["i1", none_val],
attrgetter("name"),
)
inner_qs = NullableName.objects.filter(name="i1").values_list("name")
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=inner_qs),
[none_val],
attrgetter("name"),
)
# The inner queryset wasn't executed - it should be turned
# into subquery above
self.assertIs(inner_qs._result_cache, None)
@unittest.expectedFailure
def test_col_not_in_list_containing_null(self):
"""
The following case is not handled properly because
SQL's COL NOT IN (list containing null) handling is too weird to
abstract away.
"""
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[None]), ["i1"], attrgetter("name")
)
def test_double_exclude(self):
self.assertEqual(
list(NullableName.objects.filter(~~Q(name="i1"))),
list(NullableName.objects.filter(Q(name="i1"))),
)
self.assertNotIn(
"IS NOT NULL", str(NullableName.objects.filter(~~Q(name="i1")).query)
)
class EmptyStringsAsNullTest(TestCase):
"""
Filtering on non-null character fields works as expected.
The reason for these tests is that Oracle treats '' as NULL, and this
can cause problems in query construction. Refs #17957.
"""
@classmethod
def setUpTestData(cls):
cls.nc = NamedCategory.objects.create(name="")
def test_direct_exclude(self):
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name__in=["nonexistent"]),
[self.nc.pk],
attrgetter("pk"),
)
def test_joined_exclude(self):
self.assertQuerysetEqual(
DumbCategory.objects.exclude(namedcategory__name__in=["nonexistent"]),
[self.nc.pk],
attrgetter("pk"),
)
def test_21001(self):
foo = NamedCategory.objects.create(name="foo")
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name=""), [foo.pk], attrgetter("pk")
)
class ProxyQueryCleanupTest(TestCase):
def test_evaluated_proxy_count(self):
"""
Generating the query string doesn't alter the query's state
in irreversible ways. Refs #18248.
"""
ProxyCategory.objects.create()
qs = ProxyCategory.objects.all()
self.assertEqual(qs.count(), 1)
str(qs.query)
self.assertEqual(qs.count(), 1)
class WhereNodeTest(SimpleTestCase):
class DummyNode:
def as_sql(self, compiler, connection):
return "dummy", []
class MockCompiler:
def compile(self, node):
return node.as_sql(self, connection)
def __call__(self, name):
return connection.ops.quote_name(name)
def test_empty_full_handling_conjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()])
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()])
self.assertEqual(w.as_sql(compiler, connection), ("(dummy AND dummy)", []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("NOT (dummy AND dummy)", []))
w = WhereNode(children=[NothingNode(), self.DummyNode()])
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("", []))
def test_empty_full_handling_disjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()], connector=OR)
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()], connector=OR)
self.assertEqual(w.as_sql(compiler, connection), ("(dummy OR dummy)", []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("NOT (dummy OR dummy)", []))
w = WhereNode(children=[NothingNode(), self.DummyNode()], connector=OR)
self.assertEqual(w.as_sql(compiler, connection), ("dummy", []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("NOT (dummy)", []))
def test_empty_nodes(self):
compiler = WhereNodeTest.MockCompiler()
empty_w = WhereNode()
w = WhereNode(children=[empty_w, empty_w])
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w.negate()
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.connector = OR
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w = WhereNode(children=[empty_w, NothingNode()], connector=OR)
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w = WhereNode(children=[empty_w, NothingNode()], connector=AND)
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
class QuerySetExceptionTests(SimpleTestCase):
def test_iter_exceptions(self):
qs = ExtraInfo.objects.only("author")
msg = "'ManyToOneRel' object has no attribute 'attname'"
with self.assertRaisesMessage(AttributeError, msg):
list(qs)
def test_invalid_order_by(self):
msg = "Cannot resolve keyword '*' into field. Choices are: created, id, name"
with self.assertRaisesMessage(FieldError, msg):
Article.objects.order_by("*")
def test_invalid_order_by_raw_column_alias(self):
msg = (
"Cannot resolve keyword 'queries_author.name' into field. Choices "
"are: cover, created, creator, creator_id, id, modified, name, "
"note, note_id, tags"
)
with self.assertRaisesMessage(FieldError, msg):
Item.objects.values("creator__name").order_by("queries_author.name")
def test_invalid_queryset_model(self):
msg = 'Cannot use QuerySet for "Article": Use a QuerySet for "ExtraInfo".'
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.filter(extra=Article.objects.all()))
class NullJoinPromotionOrTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.d1 = ModelD.objects.create(name="foo")
d2 = ModelD.objects.create(name="bar")
cls.a1 = ModelA.objects.create(name="a1", d=cls.d1)
c = ModelC.objects.create(name="c")
b = ModelB.objects.create(name="b", c=c)
cls.a2 = ModelA.objects.create(name="a2", b=b, d=d2)
def test_ticket_17886(self):
# The first Q-object is generating the match, the rest of the filters
# should not remove the match even if they do not match anything. The
# problem here was that b__name generates a LOUTER JOIN, then
# b__c__name generates join to c, which the ORM tried to promote but
# failed as that join isn't nullable.
q_obj = Q(d__name="foo") | Q(b__name="foo") | Q(b__c__name="foo")
qset = ModelA.objects.filter(q_obj)
self.assertEqual(list(qset), [self.a1])
# We generate one INNER JOIN to D. The join is direct and not nullable
# so we can use INNER JOIN for it. However, we can NOT use INNER JOIN
# for the b->c join, as a->b is nullable.
self.assertEqual(str(qset.query).count("INNER JOIN"), 1)
def test_isnull_filter_promotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count("LEFT OUTER"), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count("LEFT OUTER"), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count("LEFT OUTER"), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(list(qs), [self.a2])
def test_null_join_demotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=False) & Q(b__name__isnull=True))
self.assertIn(" INNER JOIN ", str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) & Q(b__name__isnull=False))
self.assertIn(" INNER JOIN ", str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=False) | Q(b__name__isnull=True))
self.assertIn(" LEFT OUTER JOIN ", str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) | Q(b__name__isnull=False))
self.assertIn(" LEFT OUTER JOIN ", str(qs.query))
def test_ticket_21366(self):
n = Note.objects.create(note="n", misc="m")
e = ExtraInfo.objects.create(info="info", note=n)
a = Author.objects.create(name="Author1", num=1, extra=e)
Ranking.objects.create(rank=1, author=a)
r1 = Report.objects.create(name="Foo", creator=a)
r2 = Report.objects.create(name="Bar")
Report.objects.create(name="Bar", creator=a)
qs = Report.objects.filter(
Q(creator__ranking__isnull=True) | Q(creator__ranking__rank=1, name="Foo")
)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
self.assertEqual(str(qs.query).count(" JOIN "), 2)
self.assertSequenceEqual(qs.order_by("name"), [r2, r1])
def test_ticket_21748(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
i3 = Identifier.objects.create(name="i3")
Program.objects.create(identifier=i1)
Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
self.assertSequenceEqual(
Identifier.objects.filter(program=None, channel=None), [i3]
)
self.assertSequenceEqual(
Identifier.objects.exclude(program=None, channel=None).order_by("name"),
[i1, i2],
)
def test_ticket_21748_double_negated_and(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
Identifier.objects.create(name="i3")
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
# Check the ~~Q() (or equivalently .exclude(~Q)) works like Q() for
# join promotion.
qs1_doubleneg = Identifier.objects.exclude(
~Q(program__id=p1.id, channel__id=c1.id)
).order_by("pk")
qs1_filter = Identifier.objects.filter(
program__id=p1.id, channel__id=c1.id
).order_by("pk")
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(
str(qs1_filter.query).count("JOIN"), str(qs1_doubleneg.query).count("JOIN")
)
self.assertEqual(2, str(qs1_doubleneg.query).count("INNER JOIN"))
self.assertEqual(
str(qs1_filter.query).count("INNER JOIN"),
str(qs1_doubleneg.query).count("INNER JOIN"),
)
def test_ticket_21748_double_negated_or(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
Identifier.objects.create(name="i3")
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Test OR + doubleneg. The expected result is that channel is LOUTER
# joined, program INNER joined
qs1_filter = Identifier.objects.filter(
Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id)
).order_by("pk")
qs1_doubleneg = Identifier.objects.exclude(
~Q(Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id))
).order_by("pk")
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(
str(qs1_filter.query).count("JOIN"), str(qs1_doubleneg.query).count("JOIN")
)
self.assertEqual(1, str(qs1_doubleneg.query).count("INNER JOIN"))
self.assertEqual(
str(qs1_filter.query).count("INNER JOIN"),
str(qs1_doubleneg.query).count("INNER JOIN"),
)
def test_ticket_21748_complex_filter(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
Identifier.objects.create(name="i3")
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Finally, a more complex case, one time in a way where each
# NOT is pushed to lowest level in the boolean tree, and
# another query where this isn't done.
qs1 = Identifier.objects.filter(
~Q(~Q(program__id=p2.id, channel__id=c1.id) & Q(program__id=p1.id))
).order_by("pk")
qs2 = Identifier.objects.filter(
Q(Q(program__id=p2.id, channel__id=c1.id) | ~Q(program__id=p1.id))
).order_by("pk")
self.assertQuerysetEqual(qs1, qs2, lambda x: x)
self.assertEqual(str(qs1.query).count("JOIN"), str(qs2.query).count("JOIN"))
self.assertEqual(0, str(qs1.query).count("INNER JOIN"))
self.assertEqual(
str(qs1.query).count("INNER JOIN"), str(qs2.query).count("INNER JOIN")
)
class ReverseJoinTrimmingTest(TestCase):
def test_reverse_trimming(self):
# We don't accidentally trim reverse joins - we can't know if there is
# anything on the other side of the join, so trimming reverse joins
# can't be done, ever.
t = Tag.objects.create()
qs = Tag.objects.filter(annotation__tag=t.pk)
self.assertIn("INNER JOIN", str(qs.query))
self.assertEqual(list(qs), [])
class JoinReuseTest(TestCase):
"""
The queries reuse joins sensibly (for example, direct joins
are always reused).
"""
def test_fk_reuse(self):
qs = Annotation.objects.filter(tag__name="foo").filter(tag__name="bar")
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_fk_reuse_select_related(self):
qs = Annotation.objects.filter(tag__name="foo").select_related("tag")
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_fk_reuse_annotation(self):
qs = Annotation.objects.filter(tag__name="foo").annotate(cnt=Count("tag__name"))
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_fk_reuse_disjunction(self):
qs = Annotation.objects.filter(Q(tag__name="foo") | Q(tag__name="bar"))
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_fk_reuse_order_by(self):
qs = Annotation.objects.filter(tag__name="foo").order_by("tag__name")
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_revo2o_reuse(self):
qs = Detail.objects.filter(member__name="foo").filter(member__name="foo")
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_revfk_noreuse(self):
qs = Author.objects.filter(report__name="r4").filter(report__name="r1")
self.assertEqual(str(qs.query).count("JOIN"), 2)
def test_inverted_q_across_relations(self):
"""
When a trimmable join is specified in the query (here school__), the
ORM detects it and removes unnecessary joins. The set of reusable joins
are updated after trimming the query so that other lookups don't
consider that the outer query's filters are in effect for the subquery
(#26551).
"""
springfield_elementary = School.objects.create()
hogward = School.objects.create()
Student.objects.create(school=springfield_elementary)
hp = Student.objects.create(school=hogward)
Classroom.objects.create(school=hogward, name="Potion")
Classroom.objects.create(school=springfield_elementary, name="Main")
qs = Student.objects.filter(
~(
Q(school__classroom__name="Main")
& Q(school__classroom__has_blackboard=None)
)
)
self.assertSequenceEqual(qs, [hp])
class DisjunctionPromotionTests(TestCase):
def test_disjunction_promotion_select_related(self):
fk1 = FK1.objects.create(f1="f1", f2="f2")
basea = BaseA.objects.create(a=fk1)
qs = BaseA.objects.filter(Q(a=fk1) | Q(b=2))
self.assertEqual(str(qs.query).count(" JOIN "), 0)
qs = qs.select_related("a", "b")
self.assertEqual(str(qs.query).count(" INNER JOIN "), 0)
self.assertEqual(str(qs.query).count(" LEFT OUTER JOIN "), 2)
with self.assertNumQueries(1):
self.assertSequenceEqual(qs, [basea])
self.assertEqual(qs[0].a, fk1)
self.assertIs(qs[0].b, None)
def test_disjunction_promotion1(self):
# Pre-existing join, add two ORed filters to the same join,
# all joins can be INNER JOINS.
qs = BaseA.objects.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
qs = qs.filter(Q(b__f1="foo") | Q(b__f2="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
# Reverse the order of AND and OR filters.
qs = BaseA.objects.filter(Q(b__f1="foo") | Q(b__f2="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
qs = qs.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
def test_disjunction_promotion2(self):
qs = BaseA.objects.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
# Now we have two different joins in an ORed condition, these
# must be OUTER joins. The pre-existing join should remain INNER.
qs = qs.filter(Q(b__f1="foo") | Q(c__f2="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
# Reverse case.
qs = BaseA.objects.filter(Q(b__f1="foo") | Q(c__f2="foo"))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
qs = qs.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
def test_disjunction_promotion3(self):
qs = BaseA.objects.filter(a__f2="bar")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
# The ANDed a__f2 filter allows us to use keep using INNER JOIN
# even inside the ORed case. If the join to a__ returns nothing,
# the ANDed filter for a__f2 can't be true.
qs = qs.filter(Q(a__f1="foo") | Q(b__f2="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
def test_disjunction_promotion3_demote(self):
# This one needs demotion logic: the first filter causes a to be
# outer joined, the second filter makes it inner join again.
qs = BaseA.objects.filter(Q(a__f1="foo") | Q(b__f2="foo")).filter(a__f2="bar")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
def test_disjunction_promotion4_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("JOIN"), 0)
# Demote needed for the "a" join. It is marked as outer join by
# above filter (even if it is trimmed away).
qs = qs.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
def test_disjunction_promotion4(self):
qs = BaseA.objects.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
def test_disjunction_promotion5_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
# Note that the above filters on a force the join to an
# inner join even if it is trimmed.
self.assertEqual(str(qs.query).count("JOIN"), 0)
qs = qs.filter(Q(a__f1="foo") | Q(b__f1="foo"))
# So, now the a__f1 join doesn't need promotion.
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
# But b__f1 does.
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
qs = BaseA.objects.filter(Q(a__f1="foo") | Q(b__f1="foo"))
# Now the join to a is created as LOUTER
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
def test_disjunction_promotion6(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("JOIN"), 0)
qs = BaseA.objects.filter(Q(a__f1="foo") & Q(b__f1="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 0)
qs = BaseA.objects.filter(Q(a__f1="foo") & Q(b__f1="foo"))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 0)
def test_disjunction_promotion7(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("JOIN"), 0)
qs = BaseA.objects.filter(Q(a__f1="foo") | (Q(b__f1="foo") & Q(a__f1="bar")))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
qs = BaseA.objects.filter(
(Q(a__f1="foo") | Q(b__f1="foo")) & (Q(a__f1="bar") | Q(c__f1="foo"))
)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 3)
self.assertEqual(str(qs.query).count("INNER JOIN"), 0)
qs = BaseA.objects.filter(
Q(a__f1="foo") | Q(a__f1="bar") & (Q(b__f1="bar") | Q(c__f1="foo"))
)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
def test_disjunction_promotion_fexpression(self):
qs = BaseA.objects.filter(Q(a__f1=F("b__f1")) | Q(b__f1="foo"))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
qs = BaseA.objects.filter(Q(a__f1=F("c__f1")) | Q(b__f1="foo"))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 3)
qs = BaseA.objects.filter(
Q(a__f1=F("b__f1")) | Q(a__f2=F("b__f2")) | Q(c__f1="foo")
)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 3)
qs = BaseA.objects.filter(Q(a__f1=F("c__f1")) | (Q(pk=1) & Q(pk=2)))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
self.assertEqual(str(qs.query).count("INNER JOIN"), 0)
class ManyToManyExcludeTest(TestCase):
def test_exclude_many_to_many(self):
i_extra = Identifier.objects.create(name="extra")
i_program = Identifier.objects.create(name="program")
program = Program.objects.create(identifier=i_program)
i_channel = Identifier.objects.create(name="channel")
channel = Channel.objects.create(identifier=i_channel)
channel.programs.add(program)
# channel contains 'program1', so all Identifiers except that one
# should be returned
self.assertSequenceEqual(
Identifier.objects.exclude(program__channel=channel).order_by("name"),
[i_channel, i_extra],
)
self.assertSequenceEqual(
Identifier.objects.exclude(program__channel=None).order_by("name"),
[i_program],
)
def test_ticket_12823(self):
pg3 = Page.objects.create(text="pg3")
pg2 = Page.objects.create(text="pg2")
pg1 = Page.objects.create(text="pg1")
pa1 = Paragraph.objects.create(text="pa1")
pa1.page.set([pg1, pg2])
pa2 = Paragraph.objects.create(text="pa2")
pa2.page.set([pg2, pg3])
pa3 = Paragraph.objects.create(text="pa3")
ch1 = Chapter.objects.create(title="ch1", paragraph=pa1)
ch2 = Chapter.objects.create(title="ch2", paragraph=pa2)
ch3 = Chapter.objects.create(title="ch3", paragraph=pa3)
b1 = Book.objects.create(title="b1", chapter=ch1)
b2 = Book.objects.create(title="b2", chapter=ch2)
b3 = Book.objects.create(title="b3", chapter=ch3)
q = Book.objects.exclude(chapter__paragraph__page__text="pg1")
self.assertNotIn("IS NOT NULL", str(q.query))
self.assertEqual(len(q), 2)
self.assertNotIn(b1, q)
self.assertIn(b2, q)
self.assertIn(b3, q)
class RelabelCloneTest(TestCase):
def test_ticket_19964(self):
my1 = MyObject.objects.create(data="foo")
my1.parent = my1
my1.save()
my2 = MyObject.objects.create(data="bar", parent=my1)
parents = MyObject.objects.filter(parent=F("id"))
children = MyObject.objects.filter(parent__in=parents).exclude(parent=F("id"))
self.assertEqual(list(parents), [my1])
# Evaluating the children query (which has parents as part of it) does
# not change results for the parents query.
self.assertEqual(list(children), [my2])
self.assertEqual(list(parents), [my1])
class Ticket20101Tests(TestCase):
def test_ticket_20101(self):
"""
Tests QuerySet ORed combining in exclude subquery case.
"""
t = Tag.objects.create(name="foo")
a1 = Annotation.objects.create(tag=t, name="a1")
a2 = Annotation.objects.create(tag=t, name="a2")
a3 = Annotation.objects.create(tag=t, name="a3")
n = Note.objects.create(note="foo", misc="bar")
qs1 = Note.objects.exclude(annotation__in=[a1, a2])
qs2 = Note.objects.filter(annotation__in=[a3])
self.assertIn(n, qs1)
self.assertNotIn(n, qs2)
self.assertIn(n, (qs1 | qs2))
class EmptyStringPromotionTests(SimpleTestCase):
def test_empty_string_promotion(self):
qs = RelatedObject.objects.filter(single__name="")
if connection.features.interprets_empty_strings_as_nulls:
self.assertIn("LEFT OUTER JOIN", str(qs.query))
else:
self.assertNotIn("LEFT OUTER JOIN", str(qs.query))
class ValuesSubqueryTests(TestCase):
def test_values_in_subquery(self):
# If a values() queryset is used, then the given values
# will be used instead of forcing use of the relation's field.
o1 = Order.objects.create(id=-2)
o2 = Order.objects.create(id=-1)
oi1 = OrderItem.objects.create(order=o1, status=0)
oi1.status = oi1.pk
oi1.save()
OrderItem.objects.create(order=o2, status=0)
# The query below should match o1 as it has related order_item
# with id == status.
self.assertSequenceEqual(
Order.objects.filter(items__in=OrderItem.objects.values_list("status")),
[o1],
)
class DoubleInSubqueryTests(TestCase):
def test_double_subquery_in(self):
lfa1 = LeafA.objects.create(data="foo")
lfa2 = LeafA.objects.create(data="bar")
lfb1 = LeafB.objects.create(data="lfb1")
lfb2 = LeafB.objects.create(data="lfb2")
Join.objects.create(a=lfa1, b=lfb1)
Join.objects.create(a=lfa2, b=lfb2)
leaf_as = LeafA.objects.filter(data="foo").values_list("pk", flat=True)
joins = Join.objects.filter(a__in=leaf_as).values_list("b__id", flat=True)
qs = LeafB.objects.filter(pk__in=joins)
self.assertSequenceEqual(qs, [lfb1])
class Ticket18785Tests(SimpleTestCase):
def test_ticket_18785(self):
# Test join trimming from ticket18785
qs = (
Item.objects.exclude(note__isnull=False)
.filter(name="something", creator__extra__isnull=True)
.order_by()
)
self.assertEqual(1, str(qs.query).count("INNER JOIN"))
self.assertEqual(0, str(qs.query).count("OUTER JOIN"))
class Ticket20788Tests(TestCase):
def test_ticket_20788(self):
Paragraph.objects.create()
paragraph = Paragraph.objects.create()
page = paragraph.page.create()
chapter = Chapter.objects.create(paragraph=paragraph)
Book.objects.create(chapter=chapter)
paragraph2 = Paragraph.objects.create()
Page.objects.create()
chapter2 = Chapter.objects.create(paragraph=paragraph2)
book2 = Book.objects.create(chapter=chapter2)
sentences_not_in_pub = Book.objects.exclude(chapter__paragraph__page=page)
self.assertSequenceEqual(sentences_not_in_pub, [book2])
class Ticket12807Tests(TestCase):
def test_ticket_12807(self):
p1 = Paragraph.objects.create()
p2 = Paragraph.objects.create()
# The ORed condition below should have no effect on the query - the
# ~Q(pk__in=[]) will always be True.
qs = Paragraph.objects.filter((Q(pk=p2.pk) | ~Q(pk__in=[])) & Q(pk=p1.pk))
self.assertSequenceEqual(qs, [p1])
class RelatedLookupTypeTests(TestCase):
error = 'Cannot query "%s": Must be "%s" instance.'
@classmethod
def setUpTestData(cls):
cls.oa = ObjectA.objects.create(name="oa")
cls.poa = ProxyObjectA.objects.get(name="oa")
cls.coa = ChildObjectA.objects.create(name="coa")
cls.wrong_type = Order.objects.create(id=cls.oa.pk)
cls.ob = ObjectB.objects.create(name="ob", objecta=cls.oa, num=1)
cls.pob1 = ProxyObjectB.objects.create(name="pob", objecta=cls.oa, num=2)
cls.pob = ProxyObjectB.objects.all()
cls.c = ObjectC.objects.create(childobjecta=cls.coa)
def test_wrong_type_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup.
"""
# Passing incorrect object type
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)
):
ObjectB.objects.get(objecta=self.wrong_type)
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)
):
ObjectB.objects.filter(objecta__in=[self.wrong_type])
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)
):
ObjectB.objects.filter(objecta=self.wrong_type)
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)
):
ObjectA.objects.filter(objectb__in=[self.wrong_type, self.ob])
# Passing an object of the class on which query is done.
with self.assertRaisesMessage(
ValueError, self.error % (self.ob, ObjectA._meta.object_name)
):
ObjectB.objects.filter(objecta__in=[self.poa, self.ob])
with self.assertRaisesMessage(
ValueError, self.error % (self.ob, ChildObjectA._meta.object_name)
):
ObjectC.objects.exclude(childobjecta__in=[self.coa, self.ob])
def test_wrong_backward_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup for backward relations.
"""
with self.assertRaisesMessage(
ValueError, self.error % (self.oa, ObjectB._meta.object_name)
):
ObjectA.objects.filter(objectb__in=[self.oa, self.ob])
with self.assertRaisesMessage(
ValueError, self.error % (self.oa, ObjectB._meta.object_name)
):
ObjectA.objects.exclude(objectb=self.oa)
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)
):
ObjectA.objects.get(objectb=self.wrong_type)
def test_correct_lookup(self):
"""
When passing proxy model objects, child objects, or parent objects,
lookups work fine.
"""
out_a = [self.oa]
out_b = [self.ob, self.pob1]
out_c = [self.c]
# proxy model objects
self.assertSequenceEqual(
ObjectB.objects.filter(objecta=self.poa).order_by("name"), out_b
)
self.assertSequenceEqual(
ObjectA.objects.filter(objectb__in=self.pob).order_by("pk"), out_a * 2
)
# child objects
self.assertSequenceEqual(ObjectB.objects.filter(objecta__in=[self.coa]), [])
self.assertSequenceEqual(
ObjectB.objects.filter(objecta__in=[self.poa, self.coa]).order_by("name"),
out_b,
)
self.assertSequenceEqual(
ObjectB.objects.filter(objecta__in=iter([self.poa, self.coa])).order_by(
"name"
),
out_b,
)
# parent objects
self.assertSequenceEqual(ObjectC.objects.exclude(childobjecta=self.oa), out_c)
# QuerySet related object type checking shouldn't issue queries
# (the querysets aren't evaluated here, hence zero queries) (#23266).
with self.assertNumQueries(0):
ObjectB.objects.filter(objecta__in=ObjectA.objects.all())
def test_values_queryset_lookup(self):
"""
ValueQuerySets are not checked for compatibility with the lookup field.
"""
# Make sure the num and objecta field values match.
ob = ObjectB.objects.get(name="ob")
ob.num = ob.objecta.pk
ob.save()
pob = ObjectB.objects.get(name="pob")
pob.num = pob.objecta.pk
pob.save()
self.assertSequenceEqual(
ObjectB.objects.filter(
objecta__in=ObjectB.objects.values_list("num")
).order_by("pk"),
[ob, pob],
)
class Ticket14056Tests(TestCase):
def test_ticket_14056(self):
s1 = SharedConnection.objects.create(data="s1")
s2 = SharedConnection.objects.create(data="s2")
s3 = SharedConnection.objects.create(data="s3")
PointerA.objects.create(connection=s2)
expected_ordering = (
[s1, s3, s2] if connection.features.nulls_order_largest else [s2, s1, s3]
)
self.assertSequenceEqual(
SharedConnection.objects.order_by("-pointera__connection", "pk"),
expected_ordering,
)
class Ticket20955Tests(TestCase):
def test_ticket_20955(self):
jack = Staff.objects.create(name="jackstaff")
jackstaff = StaffUser.objects.create(staff=jack)
jill = Staff.objects.create(name="jillstaff")
jillstaff = StaffUser.objects.create(staff=jill)
task = Task.objects.create(creator=jackstaff, owner=jillstaff, title="task")
task_get = Task.objects.get(pk=task.pk)
# Load data so that assertNumQueries doesn't complain about the get
# version's queries.
task_get.creator.staffuser.staff
task_get.owner.staffuser.staff
qs = Task.objects.select_related(
"creator__staffuser__staff", "owner__staffuser__staff"
)
self.assertEqual(str(qs.query).count(" JOIN "), 6)
task_select_related = qs.get(pk=task.pk)
with self.assertNumQueries(0):
self.assertEqual(
task_select_related.creator.staffuser.staff,
task_get.creator.staffuser.staff,
)
self.assertEqual(
task_select_related.owner.staffuser.staff,
task_get.owner.staffuser.staff,
)
class Ticket21203Tests(TestCase):
def test_ticket_21203(self):
p = Ticket21203Parent.objects.create(parent_bool=True)
c = Ticket21203Child.objects.create(parent=p)
qs = Ticket21203Child.objects.select_related("parent").defer("parent__created")
self.assertSequenceEqual(qs, [c])
self.assertIs(qs[0].parent.parent_bool, True)
class ValuesJoinPromotionTests(TestCase):
def test_values_no_promotion_for_existing(self):
qs = Node.objects.filter(parent__parent__isnull=False)
self.assertIn(" INNER JOIN ", str(qs.query))
qs = qs.values("parent__parent__id")
self.assertIn(" INNER JOIN ", str(qs.query))
# Make sure there is a left outer join without the filter.
qs = Node.objects.values("parent__parent__id")
self.assertIn(" LEFT OUTER JOIN ", str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = ObjectB.objects.values("objecta__name")
self.assertIn(" INNER JOIN ", str(qs.query))
def test_ticket_21376(self):
a = ObjectA.objects.create()
ObjectC.objects.create(objecta=a)
qs = ObjectC.objects.filter(
Q(objecta=a) | Q(objectb__objecta=a),
)
qs = qs.filter(
Q(objectb=1) | Q(objecta=a),
)
self.assertEqual(qs.count(), 1)
tblname = connection.ops.quote_name(ObjectB._meta.db_table)
self.assertIn(" LEFT OUTER JOIN %s" % tblname, str(qs.query))
class ForeignKeyToBaseExcludeTests(TestCase):
def test_ticket_21787(self):
sc1 = SpecialCategory.objects.create(special_name="sc1", name="sc1")
sc2 = SpecialCategory.objects.create(special_name="sc2", name="sc2")
sc3 = SpecialCategory.objects.create(special_name="sc3", name="sc3")
c1 = CategoryItem.objects.create(category=sc1)
CategoryItem.objects.create(category=sc2)
self.assertSequenceEqual(
SpecialCategory.objects.exclude(categoryitem__id=c1.pk).order_by("name"),
[sc2, sc3],
)
self.assertSequenceEqual(
SpecialCategory.objects.filter(categoryitem__id=c1.pk), [sc1]
)
class ReverseM2MCustomPkTests(TestCase):
def test_ticket_21879(self):
cpt1 = CustomPkTag.objects.create(id="cpt1", tag="cpt1")
cp1 = CustomPk.objects.create(name="cp1", extra="extra")
cp1.custompktag_set.add(cpt1)
self.assertSequenceEqual(CustomPk.objects.filter(custompktag=cpt1), [cp1])
self.assertSequenceEqual(CustomPkTag.objects.filter(custom_pk=cp1), [cpt1])
class Ticket22429Tests(TestCase):
def test_ticket_22429(self):
sc1 = School.objects.create()
st1 = Student.objects.create(school=sc1)
sc2 = School.objects.create()
st2 = Student.objects.create(school=sc2)
cr = Classroom.objects.create(school=sc1)
cr.students.add(st1)
queryset = Student.objects.filter(~Q(classroom__school=F("school")))
self.assertSequenceEqual(queryset, [st2])
class Ticket23605Tests(TestCase):
def test_ticket_23605(self):
# Test filtering on a complicated q-object from ticket's report.
# The query structure is such that we have multiple nested subqueries.
# The original problem was that the inner queries weren't relabeled
# correctly.
# See also #24090.
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=10000.0)
Ticket23605B.objects.create(
field_b0=10000.0, field_b1=True, modelc_fk=c1, modela_fk=a1
)
complex_q = Q(
pk__in=Ticket23605A.objects.filter(
Q(
# True for a1 as field_b0 = 10000, field_c0=10000
# False for a2 as no ticket23605b found
ticket23605b__field_b0__gte=1000000
/ F("ticket23605b__modelc_fk__field_c0")
)
&
# True for a1 (field_b1=True)
Q(ticket23605b__field_b1=True)
& ~Q(
ticket23605b__pk__in=Ticket23605B.objects.filter(
~(
# Same filters as above commented filters, but
# double-negated (one for Q() above, one for
# parentheses). So, again a1 match, a2 not.
Q(field_b1=True)
& Q(field_b0__gte=1000000 / F("modelc_fk__field_c0"))
)
)
)
).filter(ticket23605b__field_b1=True)
)
qs1 = Ticket23605A.objects.filter(complex_q)
self.assertSequenceEqual(qs1, [a1])
qs2 = Ticket23605A.objects.exclude(complex_q)
self.assertSequenceEqual(qs2, [a2])
class TestTicket24279(TestCase):
def test_ticket_24278(self):
School.objects.create()
qs = School.objects.filter(Q(pk__in=()) | Q())
self.assertQuerysetEqual(qs, [])
class TestInvalidValuesRelation(SimpleTestCase):
def test_invalid_values(self):
msg = "Field 'id' expected a number but got 'abc'."
with self.assertRaisesMessage(ValueError, msg):
Annotation.objects.filter(tag="abc")
with self.assertRaisesMessage(ValueError, msg):
Annotation.objects.filter(tag__in=[123, "abc"])
class TestTicket24605(TestCase):
def test_ticket_24605(self):
"""
Subquery table names should be quoted.
"""
i1 = Individual.objects.create(alive=True)
RelatedIndividual.objects.create(related=i1)
i2 = Individual.objects.create(alive=False)
RelatedIndividual.objects.create(related=i2)
i3 = Individual.objects.create(alive=True)
i4 = Individual.objects.create(alive=False)
self.assertSequenceEqual(
Individual.objects.filter(
Q(alive=False), Q(related_individual__isnull=True)
),
[i4],
)
self.assertSequenceEqual(
Individual.objects.exclude(
Q(alive=False), Q(related_individual__isnull=True)
).order_by("pk"),
[i1, i2, i3],
)
class Ticket23622Tests(TestCase):
@skipUnlessDBFeature("can_distinct_on_fields")
def test_ticket_23622(self):
"""
Make sure __pk__in and __in work the same for related fields when
using a distinct on subquery.
"""
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=0.0)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=123,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=23,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=234,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=12,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=567,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=76,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=7,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=56,
field_b1=True,
modelc_fk=c1,
)
qx = Q(
ticket23605b__pk__in=Ticket23605B.objects.order_by(
"modela_fk", "-field_b1"
).distinct("modela_fk")
) & Q(ticket23605b__field_b0__gte=300)
qy = Q(
ticket23605b__in=Ticket23605B.objects.order_by(
"modela_fk", "-field_b1"
).distinct("modela_fk")
) & Q(ticket23605b__field_b0__gte=300)
self.assertEqual(
set(Ticket23605A.objects.filter(qx).values_list("pk", flat=True)),
set(Ticket23605A.objects.filter(qy).values_list("pk", flat=True)),
)
self.assertSequenceEqual(Ticket23605A.objects.filter(qx), [a2])
|
7921ada9aa07034c8756155d134c27d4e6c50745866f8397374b08e3b5e8d441 | import datetime
import itertools
import unittest
from copy import copy
from unittest import mock
from django.core.exceptions import FieldError
from django.core.management.color import no_style
from django.db import (
DatabaseError,
DataError,
IntegrityError,
OperationalError,
connection,
)
from django.db.models import (
CASCADE,
PROTECT,
AutoField,
BigAutoField,
BigIntegerField,
BinaryField,
BooleanField,
CharField,
CheckConstraint,
DateField,
DateTimeField,
DecimalField,
DurationField,
F,
FloatField,
ForeignKey,
ForeignObject,
Index,
IntegerField,
JSONField,
ManyToManyField,
Model,
OneToOneField,
OrderBy,
PositiveIntegerField,
Q,
SlugField,
SmallAutoField,
SmallIntegerField,
TextField,
TimeField,
UniqueConstraint,
UUIDField,
Value,
)
from django.db.models.fields.json import KeyTextTransform
from django.db.models.functions import Abs, Cast, Collate, Lower, Random, Upper
from django.db.models.indexes import IndexExpression
from django.db.transaction import TransactionManagementError, atomic
from django.test import (
TransactionTestCase,
ignore_warnings,
skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext, isolate_apps, register_lookup
from django.utils.deprecation import RemovedInDjango51Warning
from .fields import CustomManyToManyField, InheritedManyToManyField, MediumBlobField
from .models import (
Author,
AuthorCharFieldWithIndex,
AuthorTextFieldWithIndex,
AuthorWithDefaultHeight,
AuthorWithEvenLongerName,
AuthorWithIndexedName,
AuthorWithUniqueName,
AuthorWithUniqueNameAndBirthday,
Book,
BookForeignObj,
BookWeak,
BookWithLongName,
BookWithO2O,
BookWithoutAuthor,
BookWithSlug,
IntegerPK,
Node,
Note,
NoteRename,
Tag,
TagM2MTest,
TagUniqueRename,
Thing,
UniqueTest,
new_apps,
)
class SchemaTests(TransactionTestCase):
"""
Tests for the schema-alteration code.
Be aware that these tests are more liable than most to false results,
as sometimes the code to check if a test has worked is almost as complex
as the code it is testing.
"""
available_apps = []
models = [
Author,
AuthorCharFieldWithIndex,
AuthorTextFieldWithIndex,
AuthorWithDefaultHeight,
AuthorWithEvenLongerName,
Book,
BookWeak,
BookWithLongName,
BookWithO2O,
BookWithSlug,
IntegerPK,
Node,
Note,
Tag,
TagM2MTest,
TagUniqueRename,
Thing,
UniqueTest,
]
# Utility functions
def setUp(self):
# local_models should contain test dependent model classes that will be
# automatically removed from the app cache on test tear down.
self.local_models = []
# isolated_local_models contains models that are in test methods
# decorated with @isolate_apps.
self.isolated_local_models = []
def tearDown(self):
# Delete any tables made for our models
self.delete_tables()
new_apps.clear_cache()
for model in new_apps.get_models():
model._meta._expire_cache()
if "schema" in new_apps.all_models:
for model in self.local_models:
for many_to_many in model._meta.many_to_many:
through = many_to_many.remote_field.through
if through and through._meta.auto_created:
del new_apps.all_models["schema"][through._meta.model_name]
del new_apps.all_models["schema"][model._meta.model_name]
if self.isolated_local_models:
with connection.schema_editor() as editor:
for model in self.isolated_local_models:
editor.delete_model(model)
def delete_tables(self):
"Deletes all model tables for our models for a clean test environment"
converter = connection.introspection.identifier_converter
with connection.schema_editor() as editor:
connection.disable_constraint_checking()
table_names = connection.introspection.table_names()
if connection.features.ignores_table_name_case:
table_names = [table_name.lower() for table_name in table_names]
for model in itertools.chain(SchemaTests.models, self.local_models):
tbl = converter(model._meta.db_table)
if connection.features.ignores_table_name_case:
tbl = tbl.lower()
if tbl in table_names:
editor.delete_model(model)
table_names.remove(tbl)
connection.enable_constraint_checking()
def column_classes(self, model):
with connection.cursor() as cursor:
columns = {
d[0]: (connection.introspection.get_field_type(d[1], d), d)
for d in connection.introspection.get_table_description(
cursor,
model._meta.db_table,
)
}
# SQLite has a different format for field_type
for name, (type, desc) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
return columns
def get_primary_key(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_primary_key_column(cursor, table)
def get_indexes(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return [
c["columns"][0]
for c in connection.introspection.get_constraints(
cursor, table
).values()
if c["index"] and len(c["columns"]) == 1
]
def get_uniques(self, table):
with connection.cursor() as cursor:
return [
c["columns"][0]
for c in connection.introspection.get_constraints(
cursor, table
).values()
if c["unique"] and len(c["columns"]) == 1
]
def get_constraints(self, table):
"""
Get the constraints on a table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def get_constraints_for_column(self, model, column_name):
constraints = self.get_constraints(model._meta.db_table)
constraints_for_column = []
for name, details in constraints.items():
if details["columns"] == [column_name]:
constraints_for_column.append(name)
return sorted(constraints_for_column)
def check_added_field_default(
self,
schema_editor,
model,
field,
field_name,
expected_default,
cast_function=None,
):
with connection.cursor() as cursor:
schema_editor.add_field(model, field)
cursor.execute(
"SELECT {} FROM {};".format(field_name, model._meta.db_table)
)
database_default = cursor.fetchall()[0][0]
if cast_function and type(database_default) != type(expected_default):
database_default = cast_function(database_default)
self.assertEqual(database_default, expected_default)
def get_constraints_count(self, table, column, fk_to):
"""
Return a dict with keys 'fks', 'uniques, and 'indexes' indicating the
number of foreign keys, unique constraints, and indexes on
`table`.`column`. The `fk_to` argument is a 2-tuple specifying the
expected foreign key relationship's (table, column).
"""
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, table)
counts = {"fks": 0, "uniques": 0, "indexes": 0}
for c in constraints.values():
if c["columns"] == [column]:
if c["foreign_key"] == fk_to:
counts["fks"] += 1
if c["unique"]:
counts["uniques"] += 1
elif c["index"]:
counts["indexes"] += 1
return counts
def get_column_collation(self, table, column):
with connection.cursor() as cursor:
return next(
f.collation
for f in connection.introspection.get_table_description(cursor, table)
if f.name == column
)
def assertIndexOrder(self, table, index, order):
constraints = self.get_constraints(table)
self.assertIn(index, constraints)
index_orders = constraints[index]["orders"]
self.assertTrue(
all(val == expected for val, expected in zip(index_orders, order))
)
def assertForeignKeyExists(self, model, column, expected_fk_table, field="id"):
"""
Fail if the FK constraint on `model.Meta.db_table`.`column` to
`expected_fk_table`.id doesn't exist.
"""
if not connection.features.can_introspect_foreign_keys:
return
constraints = self.get_constraints(model._meta.db_table)
constraint_fk = None
for details in constraints.values():
if details["columns"] == [column] and details["foreign_key"]:
constraint_fk = details["foreign_key"]
break
self.assertEqual(constraint_fk, (expected_fk_table, field))
def assertForeignKeyNotExists(self, model, column, expected_fk_table):
if not connection.features.can_introspect_foreign_keys:
return
with self.assertRaises(AssertionError):
self.assertForeignKeyExists(model, column, expected_fk_table)
# Tests
def test_creation_deletion(self):
"""
Tries creating a model's table, and then deleting it.
"""
with connection.schema_editor() as editor:
# Create the table
editor.create_model(Author)
# The table is there
list(Author.objects.all())
# Clean up that table
editor.delete_model(Author)
# No deferred SQL should be left over.
self.assertEqual(editor.deferred_sql, [])
# The table is gone
with self.assertRaises(DatabaseError):
list(Author.objects.all())
@skipUnlessDBFeature("supports_foreign_keys")
def test_fk(self):
"Creating tables out of FK order, then repointing, works"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Book)
editor.create_model(Author)
editor.create_model(Tag)
# Initial tables are there
list(Author.objects.all())
list(Book.objects.all())
# Make sure the FK constraint is present
with self.assertRaises(IntegrityError):
Book.objects.create(
author_id=1,
title="Much Ado About Foreign Keys",
pub_date=datetime.datetime.now(),
)
# Repoint the FK constraint
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Tag, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
self.assertForeignKeyExists(Book, "author_id", "schema_tag")
@skipUnlessDBFeature("can_create_inline_fk")
def test_inline_fk(self):
# Create some tables.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.create_model(Note)
self.assertForeignKeyNotExists(Note, "book_id", "schema_book")
# Add a foreign key from one to the other.
with connection.schema_editor() as editor:
new_field = ForeignKey(Book, CASCADE)
new_field.set_attributes_from_name("book")
editor.add_field(Note, new_field)
self.assertForeignKeyExists(Note, "book_id", "schema_book")
# Creating a FK field with a constraint uses a single statement without
# a deferred ALTER TABLE.
self.assertFalse(
[
sql
for sql in (str(statement) for statement in editor.deferred_sql)
if sql.startswith("ALTER TABLE") and "ADD CONSTRAINT" in sql
]
)
@skipUnlessDBFeature("can_create_inline_fk")
def test_add_inline_fk_update_data(self):
with connection.schema_editor() as editor:
editor.create_model(Node)
# Add an inline foreign key and update data in the same transaction.
new_field = ForeignKey(Node, CASCADE, related_name="new_fk", null=True)
new_field.set_attributes_from_name("new_parent_fk")
parent = Node.objects.create()
with connection.schema_editor() as editor:
editor.add_field(Node, new_field)
editor.execute("UPDATE schema_node SET new_parent_fk_id = %s;", [parent.pk])
assertIndex = (
self.assertIn
if connection.features.indexes_foreign_keys
else self.assertNotIn
)
assertIndex("new_parent_fk_id", self.get_indexes(Node._meta.db_table))
@skipUnlessDBFeature(
"can_create_inline_fk",
"allows_multiple_constraints_on_same_fields",
)
@isolate_apps("schema")
def test_add_inline_fk_index_update_data(self):
class Node(Model):
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Node)
# Add an inline foreign key, update data, and an index in the same
# transaction.
new_field = ForeignKey(Node, CASCADE, related_name="new_fk", null=True)
new_field.set_attributes_from_name("new_parent_fk")
parent = Node.objects.create()
with connection.schema_editor() as editor:
editor.add_field(Node, new_field)
Node._meta.add_field(new_field)
editor.execute("UPDATE schema_node SET new_parent_fk_id = %s;", [parent.pk])
editor.add_index(
Node, Index(fields=["new_parent_fk"], name="new_parent_inline_fk_idx")
)
self.assertIn("new_parent_fk_id", self.get_indexes(Node._meta.db_table))
@skipUnlessDBFeature("supports_foreign_keys")
def test_char_field_with_db_index_to_fk(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorCharFieldWithIndex)
# Change CharField to FK
old_field = AuthorCharFieldWithIndex._meta.get_field("char_field")
new_field = ForeignKey(Author, CASCADE, blank=True)
new_field.set_attributes_from_name("char_field")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorCharFieldWithIndex, old_field, new_field, strict=True
)
self.assertForeignKeyExists(
AuthorCharFieldWithIndex, "char_field_id", "schema_author"
)
@skipUnlessDBFeature("supports_foreign_keys")
@skipUnlessDBFeature("supports_index_on_text_field")
def test_text_field_with_db_index_to_fk(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorTextFieldWithIndex)
# Change TextField to FK
old_field = AuthorTextFieldWithIndex._meta.get_field("text_field")
new_field = ForeignKey(Author, CASCADE, blank=True)
new_field.set_attributes_from_name("text_field")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorTextFieldWithIndex, old_field, new_field, strict=True
)
self.assertForeignKeyExists(
AuthorTextFieldWithIndex, "text_field_id", "schema_author"
)
@isolate_apps("schema")
def test_char_field_pk_to_auto_field(self):
class Foo(Model):
id = CharField(max_length=255, primary_key=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Foo
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
@skipUnlessDBFeature("supports_foreign_keys")
def test_fk_to_proxy(self):
"Creating a FK to a proxy model creates database constraints."
class AuthorProxy(Author):
class Meta:
app_label = "schema"
apps = new_apps
proxy = True
class AuthorRef(Model):
author = ForeignKey(AuthorProxy, on_delete=CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [AuthorProxy, AuthorRef]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorRef)
self.assertForeignKeyExists(AuthorRef, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_fk_db_constraint(self):
"The db_constraint parameter is respected"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Author)
editor.create_model(BookWeak)
# Initial tables are there
list(Author.objects.all())
list(Tag.objects.all())
list(BookWeak.objects.all())
self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author")
# Make a db_constraint=False FK
new_field = ForeignKey(Tag, CASCADE, db_constraint=False)
new_field.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
self.assertForeignKeyNotExists(Author, "tag_id", "schema_tag")
# Alter to one with a constraint
new_field2 = ForeignKey(Tag, CASCADE)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
self.assertForeignKeyExists(Author, "tag_id", "schema_tag")
# Alter to one without a constraint again
new_field2 = ForeignKey(Tag, CASCADE)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field2, new_field, strict=True)
self.assertForeignKeyNotExists(Author, "tag_id", "schema_tag")
@isolate_apps("schema")
def test_no_db_constraint_added_during_primary_key_change(self):
"""
When a primary key that's pointed to by a ForeignKey with
db_constraint=False is altered, a foreign key constraint isn't added.
"""
class Author(Model):
class Meta:
app_label = "schema"
class BookWeak(Model):
author = ForeignKey(Author, CASCADE, db_constraint=False)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWeak)
self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author")
old_field = Author._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = Author
new_field.set_attributes_from_name("id")
# @isolate_apps() and inner models are needed to have the model
# relations populated, otherwise this doesn't act as a regression test.
self.assertEqual(len(new_field.model._meta.related_objects), 1)
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author")
def _test_m2m_db_constraint(self, M2MFieldClass):
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(LocalAuthorWithM2M)
# Initial tables are there
list(LocalAuthorWithM2M.objects.all())
list(Tag.objects.all())
# Make a db_constraint=False FK
new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False)
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
self.assertForeignKeyNotExists(
new_field.remote_field.through, "tag_id", "schema_tag"
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_m2m_db_constraint(self):
self._test_m2m_db_constraint(ManyToManyField)
@skipUnlessDBFeature("supports_foreign_keys")
def test_m2m_db_constraint_custom(self):
self._test_m2m_db_constraint(CustomManyToManyField)
@skipUnlessDBFeature("supports_foreign_keys")
def test_m2m_db_constraint_inherited(self):
self._test_m2m_db_constraint(InheritedManyToManyField)
def test_add_field(self):
"""
Tests adding fields to models
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add the new field
new_field = IntegerField(null=True)
new_field.set_attributes_from_name("age")
with CaptureQueriesContext(
connection
) as ctx, connection.schema_editor() as editor:
editor.add_field(Author, new_field)
drop_default_sql = editor.sql_alter_column_no_default % {
"column": editor.quote_name(new_field.name),
}
self.assertFalse(
any(drop_default_sql in query["sql"] for query in ctx.captured_queries)
)
# Table is not rebuilt.
self.assertIs(
any("CREATE TABLE" in query["sql"] for query in ctx.captured_queries), False
)
self.assertIs(
any("DROP TABLE" in query["sql"] for query in ctx.captured_queries), False
)
columns = self.column_classes(Author)
self.assertEqual(
columns["age"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertTrue(columns["age"][1][6])
def test_add_field_remove_field(self):
"""
Adding a field and removing it removes all deferred sql referring to it.
"""
with connection.schema_editor() as editor:
# Create a table with a unique constraint on the slug field.
editor.create_model(Tag)
# Remove the slug column.
editor.remove_field(Tag, Tag._meta.get_field("slug"))
self.assertEqual(editor.deferred_sql, [])
def test_add_field_temp_default(self):
"""
Tests adding fields to models with a temporary default
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = CharField(max_length=30, default="Godwin")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertEqual(
columns["surname"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(
columns["surname"][1][6],
connection.features.interprets_empty_strings_as_nulls,
)
def test_add_field_temp_default_boolean(self):
"""
Tests adding fields to models with a temporary default where
the default is False. (#21783)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = BooleanField(default=False)
new_field.set_attributes_from_name("awesome")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# BooleanField are stored as TINYINT(1) on MySQL.
field_type = columns["awesome"][0]
self.assertEqual(
field_type, connection.features.introspected_field_types["BooleanField"]
)
def test_add_field_default_transform(self):
"""
Tests adding fields to models with a default that is not directly
valid in the database (#22581)
"""
class TestTransformField(IntegerField):
# Weird field that saves the count of items in its value
def get_default(self):
return self.default
def get_prep_value(self, value):
if value is None:
return 0
return len(value)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add the field with a default it needs to cast (to string in this case)
new_field = TestTransformField(default={1: 2})
new_field.set_attributes_from_name("thing")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is there
columns = self.column_classes(Author)
field_type, field_info = columns["thing"]
self.assertEqual(
field_type, connection.features.introspected_field_types["IntegerField"]
)
# Make sure the values were transformed correctly
self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2)
def test_add_field_o2o_nullable(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Note)
new_field = OneToOneField(Note, CASCADE, null=True)
new_field.set_attributes_from_name("note")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertIn("note_id", columns)
self.assertTrue(columns["note_id"][1][6])
def test_add_field_binary(self):
"""
Tests binary fields get a sane default (#22851)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field
new_field = BinaryField(blank=True)
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# MySQL annoyingly uses the same backend, so it'll come back as one of
# these two types.
self.assertIn(columns["bits"][0], ("BinaryField", "TextField"))
def test_add_field_durationfield_with_default(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
new_field = DurationField(default=datetime.timedelta(minutes=10))
new_field.set_attributes_from_name("duration")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertEqual(
columns["duration"][0],
connection.features.introspected_field_types["DurationField"],
)
@unittest.skipUnless(connection.vendor == "mysql", "MySQL specific")
def test_add_binaryfield_mediumblob(self):
"""
Test adding a custom-sized binary field on MySQL (#24846).
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field with default
new_field = MediumBlobField(blank=True, default=b"123")
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# Introspection treats BLOBs as TextFields
self.assertEqual(columns["bits"][0], "TextField")
def test_remove_field(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
with CaptureQueriesContext(connection) as ctx:
editor.remove_field(Author, Author._meta.get_field("name"))
columns = self.column_classes(Author)
self.assertNotIn("name", columns)
if getattr(connection.features, "can_alter_table_drop_column", True):
# Table is not rebuilt.
self.assertIs(
any("CREATE TABLE" in query["sql"] for query in ctx.captured_queries),
False,
)
self.assertIs(
any("DROP TABLE" in query["sql"] for query in ctx.captured_queries),
False,
)
def test_remove_indexed_field(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorCharFieldWithIndex)
with connection.schema_editor() as editor:
editor.remove_field(
AuthorCharFieldWithIndex,
AuthorCharFieldWithIndex._meta.get_field("char_field"),
)
columns = self.column_classes(AuthorCharFieldWithIndex)
self.assertNotIn("char_field", columns)
def test_alter(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(
bool(columns["name"][1][6]),
bool(connection.features.interprets_empty_strings_as_nulls),
)
# Alter the name field to a TextField
old_field = Author._meta.get_field("name")
new_field = TextField(null=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
self.assertEqual(columns["name"][0], "TextField")
self.assertTrue(columns["name"][1][6])
# Change nullability again
new_field2 = TextField(null=False)
new_field2.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
columns = self.column_classes(Author)
self.assertEqual(columns["name"][0], "TextField")
self.assertEqual(
bool(columns["name"][1][6]),
bool(connection.features.interprets_empty_strings_as_nulls),
)
def test_alter_auto_field_to_integer_field(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change AutoField to IntegerField
old_field = Author._meta.get_field("id")
new_field = IntegerField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Now that ID is an IntegerField, the database raises an error if it
# isn't provided.
if not connection.features.supports_unspecified_pk:
with self.assertRaises(DatabaseError):
Author.objects.create()
def test_alter_auto_field_to_char_field(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change AutoField to CharField
old_field = Author._meta.get_field("id")
new_field = CharField(primary_key=True, max_length=50)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
@isolate_apps("schema")
def test_alter_auto_field_quoted_db_column(self):
class Foo(Model):
id = AutoField(primary_key=True, db_column='"quoted_id"')
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = Foo
new_field.db_column = '"quoted_id"'
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
Foo.objects.create()
def test_alter_not_unique_field_to_primary_key(self):
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change UUIDField to primary key.
old_field = Author._meta.get_field("uuid")
new_field = UUIDField(primary_key=True)
new_field.set_attributes_from_name("uuid")
new_field.model = Author
with connection.schema_editor() as editor:
editor.remove_field(Author, Author._meta.get_field("id"))
editor.alter_field(Author, old_field, new_field, strict=True)
# Redundant unique constraint is not added.
count = self.get_constraints_count(
Author._meta.db_table,
Author._meta.get_field("uuid").column,
None,
)
self.assertLessEqual(count["uniques"], 1)
@isolate_apps("schema")
def test_alter_primary_key_quoted_db_table(self):
class Foo(Model):
class Meta:
app_label = "schema"
db_table = '"foo"'
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = Foo
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
Foo.objects.create()
def test_alter_text_field(self):
# Regression for "BLOB/TEXT column 'info' can't have a default value")
# on MySQL.
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = TextField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
def test_alter_text_field_to_not_null_with_default_value(self):
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("address")
new_field = TextField(blank=True, default="", null=False)
new_field.set_attributes_from_name("address")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
@skipUnlessDBFeature("can_defer_constraint_checks", "can_rollback_ddl")
def test_alter_fk_checks_deferred_constraints(self):
"""
#25492 - Altering a foreign key's structure and data in the same
transaction.
"""
with connection.schema_editor() as editor:
editor.create_model(Node)
old_field = Node._meta.get_field("parent")
new_field = ForeignKey(Node, CASCADE)
new_field.set_attributes_from_name("parent")
parent = Node.objects.create()
with connection.schema_editor() as editor:
# Update the parent FK to create a deferred constraint check.
Node.objects.update(parent=parent)
editor.alter_field(Node, old_field, new_field, strict=True)
@isolate_apps("schema")
def test_alter_null_with_default_value_deferred_constraints(self):
class Publisher(Model):
class Meta:
app_label = "schema"
class Article(Model):
publisher = ForeignKey(Publisher, CASCADE)
title = CharField(max_length=50, null=True)
description = CharField(max_length=100, null=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Publisher)
editor.create_model(Article)
self.isolated_local_models = [Article, Publisher]
publisher = Publisher.objects.create()
Article.objects.create(publisher=publisher)
old_title = Article._meta.get_field("title")
new_title = CharField(max_length=50, null=False, default="")
new_title.set_attributes_from_name("title")
old_description = Article._meta.get_field("description")
new_description = CharField(max_length=100, null=False, default="")
new_description.set_attributes_from_name("description")
with connection.schema_editor() as editor:
editor.alter_field(Article, old_title, new_title, strict=True)
editor.alter_field(Article, old_description, new_description, strict=True)
def test_alter_text_field_to_date_field(self):
"""
#25002 - Test conversion of text field to date field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info="1988-05-05")
old_field = Note._meta.get_field("info")
new_field = DateField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns["info"][1][6])
def test_alter_text_field_to_datetime_field(self):
"""
#25002 - Test conversion of text field to datetime field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info="1988-05-05 3:16:17.4567")
old_field = Note._meta.get_field("info")
new_field = DateTimeField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns["info"][1][6])
def test_alter_text_field_to_time_field(self):
"""
#25002 - Test conversion of text field to time field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info="3:16:17.4567")
old_field = Note._meta.get_field("info")
new_field = TimeField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns["info"][1][6])
@skipIfDBFeature("interprets_empty_strings_as_nulls")
def test_alter_textual_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=50)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
@skipUnlessDBFeature("interprets_empty_strings_as_nulls")
def test_alter_textual_field_not_null_to_null(self):
"""
Nullability for textual fields is preserved on databases that
interpret empty strings as NULLs.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
columns = self.column_classes(Author)
# Field is nullable.
self.assertTrue(columns["uuid"][1][6])
# Change to NOT NULL.
old_field = Author._meta.get_field("uuid")
new_field = SlugField(null=False, blank=True)
new_field.set_attributes_from_name("uuid")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
# Nullability is preserved.
self.assertTrue(columns["uuid"][1][6])
def test_alter_numeric_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug="aaa")
old_field = UniqueTest._meta.get_field("year")
new_field = BigIntegerField()
new_field.set_attributes_from_name("year")
with connection.schema_editor() as editor:
editor.alter_field(UniqueTest, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug="bbb")
def test_alter_null_to_not_null(self):
"""
#23609 - Tests handling of default values when altering from NULL to NOT NULL.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertTrue(columns["height"][1][6])
# Create some test data
Author.objects.create(name="Not null author", height=12)
Author.objects.create(name="Null author")
# Verify null value
self.assertEqual(Author.objects.get(name="Not null author").height, 12)
self.assertIsNone(Author.objects.get(name="Null author").height)
# Alter the height field to NOT NULL with default
old_field = Author._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
self.assertFalse(columns["height"][1][6])
# Verify default value
self.assertEqual(Author.objects.get(name="Not null author").height, 12)
self.assertEqual(Author.objects.get(name="Null author").height, 42)
def test_alter_charfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_nulls when changing a CharField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change the CharField to null
old_field = Author._meta.get_field("name")
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_char_field_decrease_length(self):
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
Author.objects.create(name="x" * 255)
# Change max_length of CharField.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
msg = "value too long for type character varying(254)"
with self.assertRaisesMessage(DataError, msg):
editor.alter_field(Author, old_field, new_field, strict=True)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_with_custom_db_type(self):
from django.contrib.postgres.fields import ArrayField
class Foo(Model):
field = ArrayField(CharField(max_length=255))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("field")
new_field = ArrayField(CharField(max_length=16))
new_field.set_attributes_from_name("field")
new_field.model = Foo
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_array_field_decrease_base_field_length(self):
from django.contrib.postgres.fields import ArrayField
class ArrayModel(Model):
field = ArrayField(CharField(max_length=16))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(ArrayModel)
self.isolated_local_models = [ArrayModel]
ArrayModel.objects.create(field=["x" * 16])
old_field = ArrayModel._meta.get_field("field")
new_field = ArrayField(CharField(max_length=15))
new_field.set_attributes_from_name("field")
new_field.model = ArrayModel
with connection.schema_editor() as editor:
msg = "value too long for type character varying(15)"
with self.assertRaisesMessage(DataError, msg):
editor.alter_field(ArrayModel, old_field, new_field, strict=True)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_array_field_decrease_nested_base_field_length(self):
from django.contrib.postgres.fields import ArrayField
class ArrayModel(Model):
field = ArrayField(ArrayField(CharField(max_length=16)))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(ArrayModel)
self.isolated_local_models = [ArrayModel]
ArrayModel.objects.create(field=[["x" * 16]])
old_field = ArrayModel._meta.get_field("field")
new_field = ArrayField(ArrayField(CharField(max_length=15)))
new_field.set_attributes_from_name("field")
new_field.model = ArrayModel
with connection.schema_editor() as editor:
msg = "value too long for type character varying(15)"
with self.assertRaisesMessage(DataError, msg):
editor.alter_field(ArrayModel, old_field, new_field, strict=True)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
@skipUnlessDBFeature(
"supports_collation_on_charfield",
"supports_non_deterministic_collations",
)
def test_db_collation_arrayfield(self):
from django.contrib.postgres.fields import ArrayField
ci_collation = "case_insensitive"
cs_collation = "en-x-icu"
def drop_collation():
with connection.cursor() as cursor:
cursor.execute(f"DROP COLLATION IF EXISTS {ci_collation}")
with connection.cursor() as cursor:
cursor.execute(
f"CREATE COLLATION IF NOT EXISTS {ci_collation} (provider = icu, "
f"locale = 'und-u-ks-level2', deterministic = false)"
)
self.addCleanup(drop_collation)
class ArrayModel(Model):
field = ArrayField(CharField(max_length=16, db_collation=ci_collation))
class Meta:
app_label = "schema"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(ArrayModel)
self.isolated_local_models = [ArrayModel]
self.assertEqual(
self.get_column_collation(ArrayModel._meta.db_table, "field"),
ci_collation,
)
# Alter collation.
old_field = ArrayModel._meta.get_field("field")
new_field_cs = ArrayField(CharField(max_length=16, db_collation=cs_collation))
new_field_cs.set_attributes_from_name("field")
new_field_cs.model = ArrayField
with connection.schema_editor() as editor:
editor.alter_field(ArrayModel, old_field, new_field_cs, strict=True)
self.assertEqual(
self.get_column_collation(ArrayModel._meta.db_table, "field"),
cs_collation,
)
def test_alter_textfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_nulls when changing a TextField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
# Change the TextField to null
old_field = Note._meta.get_field("info")
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
def test_alter_null_to_not_null_keeping_default(self):
"""
#23738 - Can change a nullable field with default to non-nullable
with the same default.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
# Ensure the field is right to begin with
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertTrue(columns["height"][1][6])
# Alter the height field to NOT NULL keeping the previous default
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorWithDefaultHeight, old_field, new_field, strict=True
)
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertFalse(columns["height"][1][6])
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_fk(self):
"""
Tests altering of FKs
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
# Alter the FK
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE, editable=False)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_to_fk(self):
"""
#24447 - Tests adding a FK constraint for an existing column
"""
class LocalBook(Model):
author = IntegerField()
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalBook]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBook)
# Ensure no FK constraint exists
constraints = self.get_constraints(LocalBook._meta.db_table)
for details in constraints.values():
if details["foreign_key"]:
self.fail(
"Found an unexpected FK constraint to %s" % details["columns"]
)
old_field = LocalBook._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(LocalBook, old_field, new_field, strict=True)
self.assertForeignKeyExists(LocalBook, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_alter_o2o_to_fk(self):
"""
#24163 - Tests altering of OneToOneField to ForeignKey
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
# Ensure the field is right to begin with
columns = self.column_classes(BookWithO2O)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is unique
author = Author.objects.create(name="Joe")
BookWithO2O.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
BookWithO2O.objects.all().delete()
self.assertForeignKeyExists(BookWithO2O, "author_id", "schema_author")
# Alter the OneToOneField to ForeignKey
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is not unique anymore
Book.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
Book.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_alter_fk_to_o2o(self):
"""
#24163 - Tests altering of ForeignKey to OneToOneField
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is not unique
author = Author.objects.create(name="Joe")
Book.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
Book.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
Book.objects.all().delete()
self.assertForeignKeyExists(Book, "author_id", "schema_author")
# Alter the ForeignKey to OneToOneField
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
columns = self.column_classes(BookWithO2O)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is unique now
BookWithO2O.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
self.assertForeignKeyExists(BookWithO2O, "author_id", "schema_author")
def test_alter_field_fk_to_o2o(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
expected_indexes = 1 if connection.features.indexes_foreign_keys else 0
# Check the index is right to begin with.
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(
counts,
{"fks": expected_fks, "uniques": 0, "indexes": expected_indexes},
)
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field)
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The index on ForeignKey is replaced with a unique constraint for
# OneToOneField.
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
def test_alter_field_fk_keeps_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
expected_indexes = 1 if connection.features.indexes_foreign_keys else 0
# Check the index is right to begin with.
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(
counts,
{"fks": expected_fks, "uniques": 0, "indexes": expected_indexes},
)
old_field = Book._meta.get_field("author")
# on_delete changed from CASCADE.
new_field = ForeignKey(Author, PROTECT)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The index remains.
self.assertEqual(
counts,
{"fks": expected_fks, "uniques": 0, "indexes": expected_indexes},
)
def test_alter_field_o2o_to_fk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
# Check the unique constraint is right to begin with.
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field)
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The unique constraint on OneToOneField is replaced with an index for
# ForeignKey.
self.assertEqual(counts, {"fks": expected_fks, "uniques": 0, "indexes": 1})
def test_alter_field_o2o_keeps_unique(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
# Check the unique constraint is right to begin with.
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
old_field = BookWithO2O._meta.get_field("author")
# on_delete changed from CASCADE.
new_field = OneToOneField(Author, PROTECT)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The unique constraint remains.
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
@skipUnlessDBFeature("ignores_table_name_case")
def test_alter_db_table_case(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Alter the case of the table
old_table_name = Author._meta.db_table
with connection.schema_editor() as editor:
editor.alter_db_table(Author, old_table_name, old_table_name.upper())
def test_alter_implicit_id_to_explicit(self):
"""
Should be able to convert an implicit "id" field to an explicit "id"
primary key field.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# This will fail if DROP DEFAULT is inadvertently executed on this
# field which drops the id sequence, at least on PostgreSQL.
Author.objects.create(name="Foo")
Author.objects.create(name="Bar")
def test_alter_autofield_pk_to_bigautofield_pk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
Author.objects.create(name="Foo", pk=1)
with connection.cursor() as cursor:
sequence_reset_sqls = connection.ops.sequence_reset_sql(
no_style(), [Author]
)
if sequence_reset_sqls:
cursor.execute(sequence_reset_sqls[0])
self.assertIsNotNone(Author.objects.create(name="Bar"))
def test_alter_autofield_pk_to_smallautofield_pk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = SmallAutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
Author.objects.create(name="Foo", pk=1)
with connection.cursor() as cursor:
sequence_reset_sqls = connection.ops.sequence_reset_sql(
no_style(), [Author]
)
if sequence_reset_sqls:
cursor.execute(sequence_reset_sqls[0])
self.assertIsNotNone(Author.objects.create(name="Bar"))
def test_alter_int_pk_to_autofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
AutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field("i")
new_field = AutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# A model representing the updated model.
class IntegerPKToAutoField(Model):
i = AutoField(primary_key=True)
j = IntegerField(unique=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = IntegerPK._meta.db_table
# An id (i) is generated by the database.
obj = IntegerPKToAutoField.objects.create(j=1)
self.assertIsNotNone(obj.i)
def test_alter_int_pk_to_bigautofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
BigAutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field("i")
new_field = BigAutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# A model representing the updated model.
class IntegerPKToBigAutoField(Model):
i = BigAutoField(primary_key=True)
j = IntegerField(unique=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = IntegerPK._meta.db_table
# An id (i) is generated by the database.
obj = IntegerPKToBigAutoField.objects.create(j=1)
self.assertIsNotNone(obj.i)
@isolate_apps("schema")
def test_alter_smallint_pk_to_smallautofield_pk(self):
"""
Should be able to rename an SmallIntegerField(primary_key=True) to
SmallAutoField(primary_key=True).
"""
class SmallIntegerPK(Model):
i = SmallIntegerField(primary_key=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(SmallIntegerPK)
self.isolated_local_models = [SmallIntegerPK]
old_field = SmallIntegerPK._meta.get_field("i")
new_field = SmallAutoField(primary_key=True)
new_field.model = SmallIntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(SmallIntegerPK, old_field, new_field, strict=True)
def test_alter_int_pk_to_int_unique(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
IntegerField(unique=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
# Delete the old PK
old_field = IntegerPK._meta.get_field("i")
new_field = IntegerField(unique=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# The primary key constraint is gone. Result depends on database:
# 'id' for SQLite, None for others (must not be 'i').
self.assertIn(self.get_primary_key(IntegerPK._meta.db_table), ("id", None))
# Set up a model class as it currently stands. The original IntegerPK
# class is now out of date and some backends make use of the whole
# model class when modifying a field (such as sqlite3 when remaking a
# table) so an outdated model class leads to incorrect results.
class Transitional(Model):
i = IntegerField(unique=True)
j = IntegerField(unique=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = "INTEGERPK"
# model requires a new PK
old_field = Transitional._meta.get_field("j")
new_field = IntegerField(primary_key=True)
new_field.model = Transitional
new_field.set_attributes_from_name("j")
with connection.schema_editor() as editor:
editor.alter_field(Transitional, old_field, new_field, strict=True)
# Create a model class representing the updated model.
class IntegerUnique(Model):
i = IntegerField(unique=True)
j = IntegerField(primary_key=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = "INTEGERPK"
# Ensure unique constraint works.
IntegerUnique.objects.create(i=1, j=1)
with self.assertRaises(IntegrityError):
IntegerUnique.objects.create(i=1, j=2)
def test_rename(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertNotIn("display_name", columns)
# Alter the name field's name
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("display_name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
self.assertEqual(
columns["display_name"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertNotIn("name", columns)
@isolate_apps("schema")
def test_rename_referenced_field(self):
class Author(Model):
name = CharField(max_length=255, unique=True)
class Meta:
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE, to_field="name")
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("renamed")
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
editor.alter_field(Author, Author._meta.get_field("name"), new_field)
# Ensure the foreign key reference was updated.
self.assertForeignKeyExists(Book, "author_id", "schema_author", "renamed")
@skipIfDBFeature("interprets_empty_strings_as_nulls")
def test_rename_keep_null_status(self):
"""
Renaming a field shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = TextField()
new_field.set_attributes_from_name("detail_info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns["detail_info"][0], "TextField")
self.assertNotIn("info", columns)
with self.assertRaises(IntegrityError):
NoteRename.objects.create(detail_info=None)
def _test_m2m_create(self, M2MFieldClass):
"""
Tests M2M fields on models during creation
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author, CASCADE)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2M)
# Ensure there is now an m2m table there
columns = self.column_classes(
LocalBookWithM2M._meta.get_field("tags").remote_field.through
)
self.assertEqual(
columns["tagm2mtest_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
def test_m2m_create(self):
self._test_m2m_create(ManyToManyField)
def test_m2m_create_custom(self):
self._test_m2m_create(CustomManyToManyField)
def test_m2m_create_inherited(self):
self._test_m2m_create(InheritedManyToManyField)
def _test_m2m_create_through(self, M2MFieldClass):
"""
Tests M2M fields on models during creation with through models
"""
class LocalTagThrough(Model):
book = ForeignKey("schema.LocalBookWithM2MThrough", CASCADE)
tag = ForeignKey("schema.TagM2MTest", CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
class LocalBookWithM2MThrough(Model):
tags = M2MFieldClass(
"TagM2MTest", related_name="books", through=LocalTagThrough
)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalTagThrough, LocalBookWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalTagThrough)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2MThrough)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalTagThrough)
self.assertEqual(
columns["book_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertEqual(
columns["tag_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
def test_m2m_create_through(self):
self._test_m2m_create_through(ManyToManyField)
def test_m2m_create_through_custom(self):
self._test_m2m_create_through(CustomManyToManyField)
def test_m2m_create_through_inherited(self):
self._test_m2m_create_through(InheritedManyToManyField)
def test_m2m_through_remove(self):
class LocalAuthorNoteThrough(Model):
book = ForeignKey("schema.Author", CASCADE)
tag = ForeignKey("self", CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
class LocalNoteWithM2MThrough(Model):
authors = ManyToManyField("schema.Author", through=LocalAuthorNoteThrough)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorNoteThrough, LocalNoteWithM2MThrough]
# Create the tables.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalAuthorNoteThrough)
editor.create_model(LocalNoteWithM2MThrough)
# Remove the through parameter.
old_field = LocalNoteWithM2MThrough._meta.get_field("authors")
new_field = ManyToManyField("Author")
new_field.set_attributes_from_name("authors")
msg = (
f"Cannot alter field {old_field} into {new_field} - they are not "
f"compatible types (you cannot alter to or from M2M fields, or add or "
f"remove through= on M2M fields)"
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
editor.alter_field(LocalNoteWithM2MThrough, old_field, new_field)
def _test_m2m(self, M2MFieldClass):
"""
Tests adding/removing M2M fields on models
"""
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorWithM2M)
editor.create_model(TagM2MTest)
# Create an M2M field
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors")
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Ensure there's no m2m table there
with self.assertRaises(DatabaseError):
self.column_classes(new_field.remote_field.through)
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
# Ensure there is now an m2m table there
columns = self.column_classes(new_field.remote_field.through)
self.assertEqual(
columns["tagm2mtest_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# "Alter" the field. This should not rename the DB table to itself.
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2M, new_field, new_field, strict=True)
# Remove the M2M table again
with connection.schema_editor() as editor:
editor.remove_field(LocalAuthorWithM2M, new_field)
# Ensure there's no m2m table there
with self.assertRaises(DatabaseError):
self.column_classes(new_field.remote_field.through)
# Make sure the model state is coherent with the table one now that
# we've removed the tags field.
opts = LocalAuthorWithM2M._meta
opts.local_many_to_many.remove(new_field)
del new_apps.all_models["schema"][
new_field.remote_field.through._meta.model_name
]
opts._expire_cache()
def test_m2m(self):
self._test_m2m(ManyToManyField)
def test_m2m_custom(self):
self._test_m2m(CustomManyToManyField)
def test_m2m_inherited(self):
self._test_m2m(InheritedManyToManyField)
def _test_m2m_through_alter(self, M2MFieldClass):
"""
Tests altering M2Ms with explicit through models (should no-op)
"""
class LocalAuthorTag(Model):
author = ForeignKey("schema.LocalAuthorWithM2MThrough", CASCADE)
tag = ForeignKey("schema.TagM2MTest", CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
class LocalAuthorWithM2MThrough(Model):
name = CharField(max_length=255)
tags = M2MFieldClass(
"schema.TagM2MTest", related_name="authors", through=LocalAuthorTag
)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorTag)
editor.create_model(LocalAuthorWithM2MThrough)
editor.create_model(TagM2MTest)
# Ensure the m2m table is there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
# "Alter" the field's blankness. This should not actually do anything.
old_field = LocalAuthorWithM2MThrough._meta.get_field("tags")
new_field = M2MFieldClass(
"schema.TagM2MTest", related_name="authors", through=LocalAuthorTag
)
new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags")
with connection.schema_editor() as editor:
editor.alter_field(
LocalAuthorWithM2MThrough, old_field, new_field, strict=True
)
# Ensure the m2m table is still there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
def test_m2m_through_alter(self):
self._test_m2m_through_alter(ManyToManyField)
def test_m2m_through_alter_custom(self):
self._test_m2m_through_alter(CustomManyToManyField)
def test_m2m_through_alter_inherited(self):
self._test_m2m_through_alter(InheritedManyToManyField)
def _test_m2m_repoint(self, M2MFieldClass):
"""
Tests repointing M2M fields
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author, CASCADE)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBookWithM2M)
editor.create_model(TagM2MTest)
editor.create_model(UniqueTest)
# Ensure the M2M exists and points to TagM2MTest
if connection.features.supports_foreign_keys:
self.assertForeignKeyExists(
LocalBookWithM2M._meta.get_field("tags").remote_field.through,
"tagm2mtest_id",
"schema_tagm2mtest",
)
# Repoint the M2M
old_field = LocalBookWithM2M._meta.get_field("tags")
new_field = M2MFieldClass(UniqueTest)
new_field.contribute_to_class(LocalBookWithM2M, "uniques")
with connection.schema_editor() as editor:
editor.alter_field(LocalBookWithM2M, old_field, new_field, strict=True)
# Ensure old M2M is gone
with self.assertRaises(DatabaseError):
self.column_classes(
LocalBookWithM2M._meta.get_field("tags").remote_field.through
)
# This model looks like the new model and is used for teardown.
opts = LocalBookWithM2M._meta
opts.local_many_to_many.remove(old_field)
# Ensure the new M2M exists and points to UniqueTest
if connection.features.supports_foreign_keys:
self.assertForeignKeyExists(
new_field.remote_field.through, "uniquetest_id", "schema_uniquetest"
)
def test_m2m_repoint(self):
self._test_m2m_repoint(ManyToManyField)
def test_m2m_repoint_custom(self):
self._test_m2m_repoint(CustomManyToManyField)
def test_m2m_repoint_inherited(self):
self._test_m2m_repoint(InheritedManyToManyField)
@isolate_apps("schema")
def test_m2m_rename_field_in_target_model(self):
class LocalTagM2MTest(Model):
title = CharField(max_length=255)
class Meta:
app_label = "schema"
class LocalM2M(Model):
tags = ManyToManyField(LocalTagM2MTest)
class Meta:
app_label = "schema"
# Create the tables.
with connection.schema_editor() as editor:
editor.create_model(LocalM2M)
editor.create_model(LocalTagM2MTest)
self.isolated_local_models = [LocalM2M, LocalTagM2MTest]
# Ensure the m2m table is there.
self.assertEqual(len(self.column_classes(LocalM2M)), 1)
# Alter a field in LocalTagM2MTest.
old_field = LocalTagM2MTest._meta.get_field("title")
new_field = CharField(max_length=254)
new_field.contribute_to_class(LocalTagM2MTest, "title1")
# @isolate_apps() and inner models are needed to have the model
# relations populated, otherwise this doesn't act as a regression test.
self.assertEqual(len(new_field.model._meta.related_objects), 1)
with connection.schema_editor() as editor:
editor.alter_field(LocalTagM2MTest, old_field, new_field, strict=True)
# Ensure the m2m table is still there.
self.assertEqual(len(self.column_classes(LocalM2M)), 1)
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
def test_check_constraints(self):
"""
Tests creating/deleting CHECK constraints
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the constraint exists
constraints = self.get_constraints(Author._meta.db_table)
if not any(
details["columns"] == ["height"] and details["check"]
for details in constraints.values()
):
self.fail("No check constraint for height found")
# Alter the column to remove it
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for details in constraints.values():
if details["columns"] == ["height"] and details["check"]:
self.fail("Check constraint for height found")
# Alter the column to re-add it
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
if not any(
details["columns"] == ["height"] and details["check"]
for details in constraints.values()
):
self.fail("No check constraint for height found")
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
@isolate_apps("schema")
def test_check_constraint_timedelta_param(self):
class DurationModel(Model):
duration = DurationField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(DurationModel)
self.isolated_local_models = [DurationModel]
constraint_name = "duration_gte_5_minutes"
constraint = CheckConstraint(
check=Q(duration__gt=datetime.timedelta(minutes=5)),
name=constraint_name,
)
DurationModel._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(DurationModel, constraint)
constraints = self.get_constraints(DurationModel._meta.db_table)
self.assertIn(constraint_name, constraints)
with self.assertRaises(IntegrityError), atomic():
DurationModel.objects.create(duration=datetime.timedelta(minutes=4))
DurationModel.objects.create(duration=datetime.timedelta(minutes=10))
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
def test_remove_field_check_does_not_remove_meta_constraints(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the custom check constraint
constraint = CheckConstraint(
check=Q(height__gte=0), name="author_height_gte_0_check"
)
custom_constraint_name = constraint.name
Author._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
# Ensure the constraints exist
constraints = self.get_constraints(Author._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["height"]
and details["check"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Alter the column to remove field check
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["height"]
and details["check"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 0)
# Alter the column to re-add field check
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["height"]
and details["check"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the check constraint
with connection.schema_editor() as editor:
Author._meta.constraints = []
editor.remove_constraint(Author, constraint)
def test_unique(self):
"""
Tests removing and adding unique constraints to a single column.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the field is unique to begin with
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be non-unique
old_field = Tag._meta.get_field("slug")
new_field = SlugField(unique=False)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
# Ensure the field is no longer unique
Tag.objects.create(title="foo", slug="foo")
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be unique
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
# Ensure the field is unique again
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Rename the field
new_field3 = SlugField(unique=True)
new_field3.set_attributes_from_name("slug2")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field2, new_field3, strict=True)
# Ensure the field is still unique
TagUniqueRename.objects.create(title="foo", slug2="foo")
with self.assertRaises(IntegrityError):
TagUniqueRename.objects.create(title="bar", slug2="foo")
Tag.objects.all().delete()
def test_unique_name_quoting(self):
old_table_name = TagUniqueRename._meta.db_table
try:
with connection.schema_editor() as editor:
editor.create_model(TagUniqueRename)
editor.alter_db_table(TagUniqueRename, old_table_name, "unique-table")
TagUniqueRename._meta.db_table = "unique-table"
# This fails if the unique index name isn't quoted.
editor.alter_unique_together(TagUniqueRename, [], (("title", "slug2"),))
finally:
with connection.schema_editor() as editor:
editor.delete_model(TagUniqueRename)
TagUniqueRename._meta.db_table = old_table_name
@isolate_apps("schema")
@skipUnlessDBFeature("supports_foreign_keys")
def test_unique_no_unnecessary_fk_drops(self):
"""
If AlterField isn't selective about dropping foreign key constraints
when modifying a field with a unique constraint, the AlterField
incorrectly drops and recreates the Book.author foreign key even though
it doesn't restrict the field being changed (#29193).
"""
class Author(Model):
name = CharField(max_length=254, unique=True)
class Meta:
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
new_field = CharField(max_length=255, unique=True)
new_field.model = Author
new_field.set_attributes_from_name("name")
with self.assertLogs("django.db.backends.schema", "DEBUG") as cm:
with connection.schema_editor() as editor:
editor.alter_field(Author, Author._meta.get_field("name"), new_field)
# One SQL statement is executed to alter the field.
self.assertEqual(len(cm.records), 1)
@isolate_apps("schema")
def test_unique_and_reverse_m2m(self):
"""
AlterField can modify a unique field when there's a reverse M2M
relation on the model.
"""
class Tag(Model):
title = CharField(max_length=255)
slug = SlugField(unique=True)
class Meta:
app_label = "schema"
class Book(Model):
tags = ManyToManyField(Tag, related_name="books")
class Meta:
app_label = "schema"
self.isolated_local_models = [Book._meta.get_field("tags").remote_field.through]
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Book)
new_field = SlugField(max_length=75, unique=True)
new_field.model = Tag
new_field.set_attributes_from_name("slug")
with self.assertLogs("django.db.backends.schema", "DEBUG") as cm:
with connection.schema_editor() as editor:
editor.alter_field(Tag, Tag._meta.get_field("slug"), new_field)
# One SQL statement is executed to alter the field.
self.assertEqual(len(cm.records), 1)
# Ensure that the field is still unique.
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_field_unique_does_not_remove_meta_constraints(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorWithUniqueName)
self.local_models = [AuthorWithUniqueName]
# Add the custom unique constraint
constraint = UniqueConstraint(fields=["name"], name="author_name_uniq")
custom_constraint_name = constraint.name
AuthorWithUniqueName._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(AuthorWithUniqueName, constraint)
# Ensure the constraints exist
constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Alter the column to remove field uniqueness
old_field = AuthorWithUniqueName._meta.get_field("name")
new_field = CharField(max_length=255)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithUniqueName, old_field, new_field, strict=True)
constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 0)
# Alter the column to re-add field uniqueness
new_field2 = AuthorWithUniqueName._meta.get_field("name")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithUniqueName, new_field, new_field2, strict=True)
constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the unique constraint
with connection.schema_editor() as editor:
AuthorWithUniqueName._meta.constraints = []
editor.remove_constraint(AuthorWithUniqueName, constraint)
def test_unique_together(self):
"""
Tests removing and adding unique_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
# Ensure the fields are unique to begin with
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2011, slug="foo")
UniqueTest.objects.create(year=2011, slug="bar")
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter the model to its non-unique-together companion
with connection.schema_editor() as editor:
editor.alter_unique_together(
UniqueTest, UniqueTest._meta.unique_together, []
)
# Ensure the fields are no longer unique
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_unique_together(
UniqueTest, [], UniqueTest._meta.unique_together
)
# Ensure the fields are unique again
UniqueTest.objects.create(year=2012, slug="foo")
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
def test_unique_together_with_fk(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [["author", "title"]])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [["author", "title"]], [])
def test_unique_together_with_fk_with_existing_index(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key, where the foreign key is added after the model is
created.
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithoutAuthor)
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
editor.add_field(BookWithoutAuthor, new_field)
# Ensure the fields aren't unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [["author", "title"]])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [["author", "title"]], [])
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_unique_together_does_not_remove_meta_constraints(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorWithUniqueNameAndBirthday)
self.local_models = [AuthorWithUniqueNameAndBirthday]
# Add the custom unique constraint
constraint = UniqueConstraint(
fields=["name", "birthday"], name="author_name_birthday_uniq"
)
custom_constraint_name = constraint.name
AuthorWithUniqueNameAndBirthday._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(AuthorWithUniqueNameAndBirthday, constraint)
# Ensure the constraints exist
constraints = self.get_constraints(
AuthorWithUniqueNameAndBirthday._meta.db_table
)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Remove unique together
unique_together = AuthorWithUniqueNameAndBirthday._meta.unique_together
with connection.schema_editor() as editor:
editor.alter_unique_together(
AuthorWithUniqueNameAndBirthday, unique_together, []
)
constraints = self.get_constraints(
AuthorWithUniqueNameAndBirthday._meta.db_table
)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 0)
# Re-add unique together
with connection.schema_editor() as editor:
editor.alter_unique_together(
AuthorWithUniqueNameAndBirthday, [], unique_together
)
constraints = self.get_constraints(
AuthorWithUniqueNameAndBirthday._meta.db_table
)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the unique constraint
with connection.schema_editor() as editor:
AuthorWithUniqueNameAndBirthday._meta.constraints = []
editor.remove_constraint(AuthorWithUniqueNameAndBirthday, constraint)
def test_unique_constraint(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(fields=["name"], name="name_uq")
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIs(sql.references_table(table), True)
self.assertIs(sql.references_column(table, "name"), True)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(Upper("name").desc(), name="func_upper_uq")
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, constraint.name, ["DESC"])
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
# SQL contains a database function.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_composite_func_unique_constraint(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithSlug)
constraint = UniqueConstraint(
Upper("title"),
Lower("slug"),
name="func_upper_lower_unq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(BookWithSlug, constraint)
sql = constraint.create_sql(BookWithSlug, editor)
table = BookWithSlug._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
# SQL contains database functions.
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "slug"), True)
sql = str(sql)
self.assertIn("UPPER(%s)" % editor.quote_name("title"), sql)
self.assertIn("LOWER(%s)" % editor.quote_name("slug"), sql)
self.assertLess(sql.index("UPPER"), sql.index("LOWER"))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(BookWithSlug, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_unique_constraint_field_and_expression(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
F("height").desc(),
"uuid",
Lower("name").asc(),
name="func_f_lower_field_unq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, constraint.name, ["DESC", "ASC", "ASC"])
constraints = self.get_constraints(table)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertEqual(len(constraints[constraint.name]["columns"]), 3)
self.assertEqual(constraints[constraint.name]["columns"][1], "uuid")
# SQL contains database functions and columns.
self.assertIs(sql.references_column(table, "height"), True)
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "uuid"), True)
self.assertIn("LOWER(%s)" % editor.quote_name("name"), str(sql))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_partial_indexes")
def test_func_unique_constraint_partial(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
Upper("name"),
name="func_upper_cond_weight_uq",
condition=Q(weight__isnull=False),
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql))
self.assertIn(
"WHERE %s IS NOT NULL" % editor.quote_name("weight"),
str(sql),
)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_covering_indexes")
def test_func_unique_constraint_covering(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
Upper("name"),
name="func_upper_covering_uq",
include=["weight", "height"],
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertEqual(
constraints[constraint.name]["columns"],
[None, "weight", "height"],
)
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "weight"), True)
self.assertIs(sql.references_column(table, "height"), True)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql))
self.assertIn(
"INCLUDE (%s, %s)"
% (
editor.quote_name("weight"),
editor.quote_name("height"),
),
str(sql),
)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_lookups(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
with register_lookup(CharField, Lower), register_lookup(IntegerField, Abs):
constraint = UniqueConstraint(
F("name__lower"),
F("weight__abs"),
name="func_lower_abs_lookup_uq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
# SQL contains columns.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "weight"), True)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_collate(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithSlug)
constraint = UniqueConstraint(
Collate(F("title"), collation=collation).desc(),
Collate("slug", collation=collation),
name="func_collate_uq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(BookWithSlug, constraint)
sql = constraint.create_sql(BookWithSlug, editor)
table = BookWithSlug._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, constraint.name, ["DESC", "ASC"])
# SQL contains columns and a collation.
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "slug"), True)
self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(BookWithSlug, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipIfDBFeature("supports_expression_indexes")
def test_func_unique_constraint_unsupported(self):
# UniqueConstraint is ignored on databases that don't support indexes on
# expressions.
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(F("name"), name="func_name_uq")
with connection.schema_editor() as editor, self.assertNumQueries(0):
self.assertIsNone(editor.add_constraint(Author, constraint))
self.assertIsNone(editor.remove_constraint(Author, constraint))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_nonexistent_field(self):
constraint = UniqueConstraint(Lower("nonexistent"), name="func_nonexistent_uq")
msg = (
"Cannot resolve keyword 'nonexistent' into field. Choices are: "
"height, id, name, uuid, weight"
)
with self.assertRaisesMessage(FieldError, msg):
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_nondeterministic(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(Random(), name="func_random_uq")
with connection.schema_editor() as editor:
with self.assertRaises(DatabaseError):
editor.add_constraint(Author, constraint)
@ignore_warnings(category=RemovedInDjango51Warning)
def test_index_together(self):
"""
Tests removing and adding index_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure there's no index on the year/slug columns first
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c["columns"] == ["slug", "title"]
),
False,
)
# Alter the model to add an index
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [], [("slug", "title")])
# Ensure there is now an index
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c["columns"] == ["slug", "title"]
),
True,
)
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [("slug", "title")], [])
# Ensure there's no index
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c["columns"] == ["slug", "title"]
),
False,
)
@ignore_warnings(category=RemovedInDjango51Warning)
def test_index_together_with_fk(self):
"""
Tests removing and adding index_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.index_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [], [["author", "title"]])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [["author", "title"]], [])
@ignore_warnings(category=RemovedInDjango51Warning)
@isolate_apps("schema")
def test_create_index_together(self):
"""
Tests creating models with index_together already defined
"""
class TagIndexed(Model):
title = CharField(max_length=255)
slug = SlugField(unique=True)
class Meta:
app_label = "schema"
index_together = [["slug", "title"]]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(TagIndexed)
self.isolated_local_models = [TagIndexed]
# Ensure there is an index
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tagindexed").values()
if c["columns"] == ["slug", "title"]
),
True,
)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
@ignore_warnings(category=RemovedInDjango51Warning)
@isolate_apps("schema")
def test_remove_index_together_does_not_remove_meta_indexes(self):
class AuthorWithIndexedNameAndBirthday(Model):
name = CharField(max_length=255)
birthday = DateField()
class Meta:
app_label = "schema"
index_together = [["name", "birthday"]]
with connection.schema_editor() as editor:
editor.create_model(AuthorWithIndexedNameAndBirthday)
self.isolated_local_models = [AuthorWithIndexedNameAndBirthday]
# Add the custom index
index = Index(fields=["name", "birthday"], name="author_name_birthday_idx")
custom_index_name = index.name
AuthorWithIndexedNameAndBirthday._meta.indexes = [index]
with connection.schema_editor() as editor:
editor.add_index(AuthorWithIndexedNameAndBirthday, index)
# Ensure the indexes exist
constraints = self.get_constraints(
AuthorWithIndexedNameAndBirthday._meta.db_table
)
self.assertIn(custom_index_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["index"]
and name != custom_index_name
]
self.assertEqual(len(other_constraints), 1)
# Remove index together
index_together = AuthorWithIndexedNameAndBirthday._meta.index_together
with connection.schema_editor() as editor:
editor.alter_index_together(
AuthorWithIndexedNameAndBirthday, index_together, []
)
constraints = self.get_constraints(
AuthorWithIndexedNameAndBirthday._meta.db_table
)
self.assertIn(custom_index_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["index"]
and name != custom_index_name
]
self.assertEqual(len(other_constraints), 0)
# Re-add index together
with connection.schema_editor() as editor:
editor.alter_index_together(
AuthorWithIndexedNameAndBirthday, [], index_together
)
constraints = self.get_constraints(
AuthorWithIndexedNameAndBirthday._meta.db_table
)
self.assertIn(custom_index_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["index"]
and name != custom_index_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the index
with connection.schema_editor() as editor:
AuthorWithIndexedNameAndBirthday._meta.indexes = []
editor.remove_index(AuthorWithIndexedNameAndBirthday, index)
@isolate_apps("schema")
def test_db_table(self):
"""
Tests renaming of the table
"""
class Author(Model):
name = CharField(max_length=255)
class Meta:
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = "schema"
# Create the table and one referring it.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there to begin with
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
# Alter the table
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
editor.alter_db_table(Author, "schema_author", "schema_otherauthor")
Author._meta.db_table = "schema_otherauthor"
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
# Ensure the foreign key reference was updated
self.assertForeignKeyExists(Book, "author_id", "schema_otherauthor")
# Alter the table again
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
editor.alter_db_table(Author, "schema_otherauthor", "schema_author")
# Ensure the table is still there
Author._meta.db_table = "schema_author"
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
def test_add_remove_index(self):
"""
Tests index addition and removal
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the table is there and has no index
self.assertNotIn("title", self.get_indexes(Author._meta.db_table))
# Add the index
index = Index(fields=["name"], name="author_title_idx")
with connection.schema_editor() as editor:
editor.add_index(Author, index)
self.assertIn("name", self.get_indexes(Author._meta.db_table))
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn("name", self.get_indexes(Author._meta.db_table))
def test_remove_db_index_doesnt_remove_custom_indexes(self):
"""
Changing db_index to False doesn't remove indexes from Meta.indexes.
"""
with connection.schema_editor() as editor:
editor.create_model(AuthorWithIndexedName)
self.local_models = [AuthorWithIndexedName]
# Ensure the table has its index
self.assertIn("name", self.get_indexes(AuthorWithIndexedName._meta.db_table))
# Add the custom index
index = Index(fields=["-name"], name="author_name_idx")
author_index_name = index.name
with connection.schema_editor() as editor:
db_index_name = editor._create_index_name(
table_name=AuthorWithIndexedName._meta.db_table,
column_names=("name",),
)
try:
AuthorWithIndexedName._meta.indexes = [index]
with connection.schema_editor() as editor:
editor.add_index(AuthorWithIndexedName, index)
old_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table)
self.assertIn(author_index_name, old_constraints)
self.assertIn(db_index_name, old_constraints)
# Change name field to db_index=False
old_field = AuthorWithIndexedName._meta.get_field("name")
new_field = CharField(max_length=255)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorWithIndexedName, old_field, new_field, strict=True
)
new_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table)
self.assertNotIn(db_index_name, new_constraints)
# The index from Meta.indexes is still in the database.
self.assertIn(author_index_name, new_constraints)
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(AuthorWithIndexedName, index)
finally:
AuthorWithIndexedName._meta.indexes = []
def test_order_index(self):
"""
Indexes defined with ordering (ASC/DESC) defined on column
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
# The table doesn't have an index
self.assertNotIn("title", self.get_indexes(Author._meta.db_table))
index_name = "author_name_idx"
# Add the index
index = Index(fields=["name", "-weight"], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(Author, index)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(Author._meta.db_table, index_name, ["ASC", "DESC"])
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
def test_indexes(self):
"""
Tests creation/altering of indexes
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there and has the right index
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to remove the index
old_field = Book._meta.get_field("title")
new_field = CharField(max_length=100, db_index=False)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the table is there and has no index
self.assertNotIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to re-add the index
new_field2 = Book._meta.get_field("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, new_field, new_field2, strict=True)
# Ensure the table is there and has the index again
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Add a unique column, verify that creates an implicit index
new_field3 = BookWithSlug._meta.get_field("slug")
with connection.schema_editor() as editor:
editor.add_field(Book, new_field3)
self.assertIn(
"slug",
self.get_uniques(Book._meta.db_table),
)
# Remove the unique, check the index goes with it
new_field4 = CharField(max_length=20, unique=False)
new_field4.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True)
self.assertNotIn(
"slug",
self.get_uniques(Book._meta.db_table),
)
def test_text_field_with_db_index(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorTextFieldWithIndex)
# The text_field index is present if the database supports it.
assertion = (
self.assertIn
if connection.features.supports_index_on_text_field
else self.assertNotIn
)
assertion(
"text_field", self.get_indexes(AuthorTextFieldWithIndex._meta.db_table)
)
def _index_expressions_wrappers(self):
index_expression = IndexExpression()
index_expression.set_wrapper_classes(connection)
return ", ".join(
[
wrapper_cls.__qualname__
for wrapper_cls in index_expression.wrapper_classes
]
)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_multiple_wrapper_references(self):
index = Index(OrderBy(F("name").desc(), descending=True), name="name")
msg = (
"Multiple references to %s can't be used in an indexed expression."
% self._index_expressions_wrappers()
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
editor.add_index(Author, index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_invalid_topmost_expressions(self):
index = Index(Upper(F("name").desc()), name="name")
msg = (
"%s must be topmost expressions in an indexed expression."
% self._index_expressions_wrappers()
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
editor.add_index(Author, index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Lower("name").desc(), name="func_lower_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC"])
# SQL contains a database function.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("LOWER(%s)" % editor.quote_name("name"), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_f(self):
with connection.schema_editor() as editor:
editor.create_model(Tag)
index = Index("slug", F("title").desc(), name="func_f_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Tag, index)
sql = index.create_sql(Tag, editor)
table = Tag._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(Tag._meta.db_table, index.name, ["ASC", "DESC"])
# SQL contains columns.
self.assertIs(sql.references_column(table, "slug"), True)
self.assertIs(sql.references_column(table, "title"), True)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Tag, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_lookups(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
with register_lookup(CharField, Lower), register_lookup(IntegerField, Abs):
index = Index(
F("name__lower"),
F("weight__abs"),
name="func_lower_abs_lookup_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
# SQL contains columns.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "weight"), True)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_composite_func_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Lower("name"), Upper("name"), name="func_lower_upper_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
# SQL contains database functions.
self.assertIs(sql.references_column(table, "name"), True)
sql = str(sql)
self.assertIn("LOWER(%s)" % editor.quote_name("name"), sql)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), sql)
self.assertLess(sql.index("LOWER"), sql.index("UPPER"))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_composite_func_index_field_and_expression(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
index = Index(
F("author").desc(),
Lower("title").asc(),
"pub_date",
name="func_f_lower_field_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Book, index)
sql = index.create_sql(Book, editor)
table = Book._meta.db_table
constraints = self.get_constraints(table)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC", "ASC", "ASC"])
self.assertEqual(len(constraints[index.name]["columns"]), 3)
self.assertEqual(constraints[index.name]["columns"][2], "pub_date")
# SQL contains database functions and columns.
self.assertIs(sql.references_column(table, "author_id"), True)
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "pub_date"), True)
self.assertIn("LOWER(%s)" % editor.quote_name("title"), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Book, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
@isolate_apps("schema")
def test_func_index_f_decimalfield(self):
class Node(Model):
value = DecimalField(max_digits=5, decimal_places=2)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Node)
index = Index(F("value"), name="func_f_decimalfield_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Node, index)
sql = index.create_sql(Node, editor)
table = Node._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "value"), True)
# SQL doesn't contain casting.
self.assertNotIn("CAST", str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Node, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_cast(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Cast("weight", FloatField()), name="func_cast_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "weight"), True)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_collate(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithSlug)
index = Index(
Collate(F("title"), collation=collation).desc(),
Collate("slug", collation=collation),
name="func_collate_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(BookWithSlug, index)
sql = index.create_sql(BookWithSlug, editor)
table = Book._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC", "ASC"])
# SQL contains columns and a collation.
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "slug"), True)
self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Book, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
@skipIfDBFeature("collate_as_index_expression")
def test_func_index_collate_f_ordered(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(
Collate(F("name").desc(), collation=collation),
name="func_collate_f_desc_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC"])
# SQL contains columns and a collation.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_calc(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(F("height") / (F("weight") + Value(5)), name="func_calc_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
# SQL contains columns and expressions.
self.assertIs(sql.references_column(table, "height"), True)
self.assertIs(sql.references_column(table, "weight"), True)
sql = str(sql)
self.assertIs(
sql.index(editor.quote_name("height"))
< sql.index("/")
< sql.index(editor.quote_name("weight"))
< sql.index("+")
< sql.index("5"),
True,
)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_json_field")
@isolate_apps("schema")
def test_func_index_json_key_transform(self):
class JSONModel(Model):
field = JSONField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(JSONModel)
self.isolated_local_models = [JSONModel]
index = Index("field__some_key", name="func_json_key_idx")
with connection.schema_editor() as editor:
editor.add_index(JSONModel, index)
sql = index.create_sql(JSONModel, editor)
table = JSONModel._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "field"), True)
with connection.schema_editor() as editor:
editor.remove_index(JSONModel, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_json_field")
@isolate_apps("schema")
def test_func_index_json_key_transform_cast(self):
class JSONModel(Model):
field = JSONField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(JSONModel)
self.isolated_local_models = [JSONModel]
index = Index(
Cast(KeyTextTransform("some_key", "field"), IntegerField()),
name="func_json_key_cast_idx",
)
with connection.schema_editor() as editor:
editor.add_index(JSONModel, index)
sql = index.create_sql(JSONModel, editor)
table = JSONModel._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "field"), True)
with connection.schema_editor() as editor:
editor.remove_index(JSONModel, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipIfDBFeature("supports_expression_indexes")
def test_func_index_unsupported(self):
# Index is ignored on databases that don't support indexes on
# expressions.
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(F("name"), name="random_idx")
with connection.schema_editor() as editor, self.assertNumQueries(0):
self.assertIsNone(editor.add_index(Author, index))
self.assertIsNone(editor.remove_index(Author, index))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_nonexistent_field(self):
index = Index(Lower("nonexistent"), name="func_nonexistent_idx")
msg = (
"Cannot resolve keyword 'nonexistent' into field. Choices are: "
"height, id, name, uuid, weight"
)
with self.assertRaisesMessage(FieldError, msg):
with connection.schema_editor() as editor:
editor.add_index(Author, index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_nondeterministic(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Random(), name="func_random_idx")
with connection.schema_editor() as editor:
with self.assertRaises(DatabaseError):
editor.add_index(Author, index)
def test_primary_key(self):
"""
Tests altering of the primary key
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the table is there and has the right PK
self.assertEqual(self.get_primary_key(Tag._meta.db_table), "id")
# Alter to change the PK
id_field = Tag._meta.get_field("id")
old_field = Tag._meta.get_field("slug")
new_field = SlugField(primary_key=True)
new_field.set_attributes_from_name("slug")
new_field.model = Tag
with connection.schema_editor() as editor:
editor.remove_field(Tag, id_field)
editor.alter_field(Tag, old_field, new_field)
# Ensure the PK changed
self.assertNotIn(
"id",
self.get_indexes(Tag._meta.db_table),
)
self.assertEqual(self.get_primary_key(Tag._meta.db_table), "slug")
def test_alter_primary_key_the_same_name(self):
with connection.schema_editor() as editor:
editor.create_model(Thing)
old_field = Thing._meta.get_field("when")
new_field = CharField(max_length=2, primary_key=True)
new_field.set_attributes_from_name("when")
new_field.model = Thing
with connection.schema_editor() as editor:
editor.alter_field(Thing, old_field, new_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
with connection.schema_editor() as editor:
editor.alter_field(Thing, new_field, old_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
def test_context_manager_exit(self):
"""
Ensures transaction is correctly closed when an error occurs
inside a SchemaEditor context.
"""
class SomeError(Exception):
pass
try:
with connection.schema_editor():
raise SomeError
except SomeError:
self.assertFalse(connection.in_atomic_block)
@skipIfDBFeature("can_rollback_ddl")
def test_unsupported_transactional_ddl_disallowed(self):
message = (
"Executing DDL statements while in a transaction on databases "
"that can't perform a rollback is prohibited."
)
with atomic(), connection.schema_editor() as editor:
with self.assertRaisesMessage(TransactionManagementError, message):
editor.execute(
editor.sql_create_table % {"table": "foo", "definition": ""}
)
@skipUnlessDBFeature("supports_foreign_keys", "indexes_foreign_keys")
def test_foreign_key_index_long_names_regression(self):
"""
Regression test for #21497.
Only affects databases that supports foreign keys.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Find the properly shortened column name
column_name = connection.ops.quote_name(
"author_foreign_key_with_really_long_field_name_id"
)
column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase
# Ensure the table is there and has an index on the column
self.assertIn(
column_name,
self.get_indexes(BookWithLongName._meta.db_table),
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_add_foreign_key_long_names(self):
"""
Regression test for #23009.
Only affects databases that supports foreign keys.
"""
# Create the initial tables
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Add a second FK, this would fail due to long ref name before the fix
new_field = ForeignKey(
AuthorWithEvenLongerName, CASCADE, related_name="something"
)
new_field.set_attributes_from_name(
"author_other_really_long_named_i_mean_so_long_fk"
)
with connection.schema_editor() as editor:
editor.add_field(BookWithLongName, new_field)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_foreign_keys")
def test_add_foreign_key_quoted_db_table(self):
class Author(Model):
class Meta:
db_table = '"table_author_double_quoted"'
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
self.isolated_local_models = [Author]
if connection.vendor == "mysql":
self.assertForeignKeyExists(
Book, "author_id", '"table_author_double_quoted"'
)
else:
self.assertForeignKeyExists(Book, "author_id", "table_author_double_quoted")
def test_add_foreign_object(self):
with connection.schema_editor() as editor:
editor.create_model(BookForeignObj)
self.local_models = [BookForeignObj]
new_field = ForeignObject(
Author, on_delete=CASCADE, from_fields=["author_id"], to_fields=["id"]
)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.add_field(BookForeignObj, new_field)
def test_creation_deletion_reserved_names(self):
"""
Tries creating a model's table, and then deleting it when it has a
SQL reserved name.
"""
# Create the table
with connection.schema_editor() as editor:
try:
editor.create_model(Thing)
except OperationalError as e:
self.fail(
"Errors when applying initial migration for a model "
"with a table named after an SQL reserved word: %s" % e
)
# The table is there
list(Thing.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Thing)
# The table is gone
with self.assertRaises(DatabaseError):
list(Thing.objects.all())
def test_remove_constraints_capital_letters(self):
"""
#23065 - Constraint names must be quoted if they contain capital letters.
"""
def get_field(*args, field_class=IntegerField, **kwargs):
kwargs["db_column"] = "CamelCase"
field = field_class(*args, **kwargs)
field.set_attributes_from_name("CamelCase")
return field
model = Author
field = get_field()
table = model._meta.db_table
column = field.column
identifier_converter = connection.introspection.identifier_converter
with connection.schema_editor() as editor:
editor.create_model(model)
editor.add_field(model, field)
constraint_name = "CamelCaseIndex"
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(
editor.sql_create_index
% {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"using": "",
"columns": editor.quote_name(column),
"extra": "",
"condition": "",
"include": "",
}
)
self.assertIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
editor.alter_field(model, get_field(db_index=True), field, strict=True)
self.assertNotIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
constraint_name = "CamelCaseUniqConstraint"
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(editor._create_unique_sql(model, [field], constraint_name))
self.assertIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
editor.alter_field(model, get_field(unique=True), field, strict=True)
self.assertNotIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
if editor.sql_create_fk and connection.features.can_introspect_foreign_keys:
constraint_name = "CamelCaseFKConstraint"
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(
editor.sql_create_fk
% {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"column": editor.quote_name(column),
"to_table": editor.quote_name(table),
"to_column": editor.quote_name(model._meta.auto_field.column),
"deferrable": connection.ops.deferrable_sql(),
}
)
self.assertIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
editor.alter_field(
model,
get_field(Author, CASCADE, field_class=ForeignKey),
field,
strict=True,
)
self.assertNotIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
def test_add_field_use_effective_default(self):
"""
#23987 - effective_default() should be used as the field default when
adding a new field.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name="Anonymous1")
# Add new CharField to ensure default will be used from effective_default
new_field = CharField(max_length=15, blank=True)
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(
item[0],
None if connection.features.interprets_empty_strings_as_nulls else "",
)
def test_add_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name="Anonymous1")
# Add new CharField with a default
new_field = CharField(max_length=15, blank=True, default="surname default")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], "surname default")
# And that the default is no longer set in the database.
field = next(
f
for f in connection.introspection.get_table_description(
cursor, "schema_author"
)
if f.name == "surname"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_add_field_default_nullable(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add new nullable CharField with a default.
new_field = CharField(max_length=15, blank=True, null=True, default="surname")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
Author.objects.create(name="Anonymous1")
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertIsNone(item[0])
field = next(
f
for f in connection.introspection.get_table_description(
cursor,
"schema_author",
)
if f.name == "surname"
)
# Field is still nullable.
self.assertTrue(field.null_ok)
# The database default is no longer set.
if connection.features.can_introspect_default:
self.assertIn(field.default, ["NULL", None])
def test_add_textfield_default_nullable(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add new nullable TextField with a default.
new_field = TextField(blank=True, null=True, default="text")
new_field.set_attributes_from_name("description")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
Author.objects.create(name="Anonymous1")
with connection.cursor() as cursor:
cursor.execute("SELECT description FROM schema_author;")
item = cursor.fetchall()[0]
self.assertIsNone(item[0])
field = next(
f
for f in connection.introspection.get_table_description(
cursor,
"schema_author",
)
if f.name == "description"
)
# Field is still nullable.
self.assertTrue(field.null_ok)
# The database default is no longer set.
if connection.features.can_introspect_default:
self.assertIn(field.default, ["NULL", None])
def test_alter_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name="Anonymous1")
self.assertIsNone(Author.objects.get().height)
old_field = Author._meta.get_field("height")
# The default from the new field is used in updating existing rows.
new_field = IntegerField(blank=True, default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(Author.objects.get().height, 42)
# The database default should be removed.
with connection.cursor() as cursor:
field = next(
f
for f in connection.introspection.get_table_description(
cursor, "schema_author"
)
if f.name == "height"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_alter_field_default_doesnt_perform_queries(self):
"""
No queries are performed if a field default changes and the field's
not changing from null to non-null.
"""
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_default = old_field.default * 2
new_field = PositiveIntegerField(null=True, blank=True, default=new_default)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(
AuthorWithDefaultHeight, old_field, new_field, strict=True
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_fk_attributes_noop(self):
"""
No queries are performed when changing field attributes that don't
affect the schema.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
old_field = Book._meta.get_field("author")
new_field = ForeignKey(
Author,
blank=True,
editable=False,
error_messages={"invalid": "error message"},
help_text="help text",
limit_choices_to={"limit": "choice"},
on_delete=PROTECT,
related_name="related_name",
related_query_name="related_query_name",
validators=[lambda x: x],
verbose_name="verbose name",
)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Book, old_field, new_field, strict=True)
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Book, new_field, old_field, strict=True)
def test_alter_field_choices_noop(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("name")
new_field = CharField(
choices=(("Jane", "Jane"), ("Joe", "Joe")),
max_length=255,
)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Author, old_field, new_field, strict=True)
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Author, new_field, old_field, strict=True)
def test_add_textfield_unhashable_default(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name="Anonymous1")
# Create a field that has an unhashable default
new_field = TextField(default={})
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_add_indexed_charfield(self):
field = CharField(max_length=255, db_index=True)
field.set_attributes_from_name("nom_de_plume")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
# Should create two indexes; one for like operator.
self.assertEqual(
self.get_constraints_for_column(Author, "nom_de_plume"),
[
"schema_author_nom_de_plume_7570a851",
"schema_author_nom_de_plume_7570a851_like",
],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_add_unique_charfield(self):
field = CharField(max_length=255, unique=True)
field.set_attributes_from_name("nom_de_plume")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
# Should create two indexes; one for like operator.
self.assertEqual(
self.get_constraints_for_column(Author, "nom_de_plume"),
[
"schema_author_nom_de_plume_7570a851_like",
"schema_author_nom_de_plume_key",
],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_index_to_charfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
# Alter to add db_index=True and create 2 indexes.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, db_index=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, "name"),
["schema_author_name_1fbc5617", "schema_author_name_1fbc5617_like"],
)
# Remove db_index=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_unique_to_charfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
# Alter to add unique=True and create 2 indexes.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, "name"),
["schema_author_name_1fbc5617_like", "schema_author_name_1fbc5617_uniq"],
)
# Remove unique=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_index_to_textfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Note)
self.assertEqual(self.get_constraints_for_column(Note, "info"), [])
# Alter to add db_index=True and create 2 indexes.
old_field = Note._meta.get_field("info")
new_field = TextField(db_index=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Note, "info"),
["schema_note_info_4b0ea695", "schema_note_info_4b0ea695_like"],
)
# Remove db_index=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Note, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Note, "info"), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_unique_to_charfield_with_db_index(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
# Alter to add unique=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field("title")
new_field = CharField(max_length=100, db_index=True, unique=True)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"],
)
# Alter to remove unique=True (should drop unique index)
new_field2 = CharField(max_length=100, db_index=True)
new_field2.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_remove_unique_and_db_index_from_charfield(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
# Alter to add unique=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field("title")
new_field = CharField(max_length=100, db_index=True, unique=True)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"],
)
# Alter to remove both unique=True and db_index=True (should drop all indexes)
new_field2 = CharField(max_length=100)
new_field2.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"), []
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_swap_unique_and_db_index_with_charfield(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
# Alter to set unique=True and remove db_index=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field("title")
new_field = CharField(max_length=100, unique=True)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"],
)
# Alter to set db_index=True and remove unique=True (should restore index)
new_field2 = CharField(max_length=100, db_index=True)
new_field2.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_db_index_to_charfield_with_unique(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Tag)
self.assertEqual(
self.get_constraints_for_column(Tag, "slug"),
["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"],
)
# Alter to add db_index=True
old_field = Tag._meta.get_field("slug")
new_field = SlugField(db_index=True, unique=True)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Tag, "slug"),
["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"],
)
# Alter to remove db_index=True
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(Tag, "slug"),
["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"],
)
def test_alter_field_add_index_to_integerfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, "weight"), [])
# Alter to add db_index=True and create index.
old_field = Author._meta.get_field("weight")
new_field = IntegerField(null=True, db_index=True)
new_field.set_attributes_from_name("weight")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, "weight"),
["schema_author_weight_587740f9"],
)
# Remove db_index=True to drop index.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, "weight"), [])
def test_alter_pk_with_self_referential_field(self):
"""
Changing the primary key field name of a model with a self-referential
foreign key (#26384).
"""
with connection.schema_editor() as editor:
editor.create_model(Node)
old_field = Node._meta.get_field("node_id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(Node, old_field, new_field, strict=True)
self.assertForeignKeyExists(Node, "parent_id", Node._meta.db_table)
@mock.patch("django.db.backends.base.schema.datetime")
@mock.patch("django.db.backends.base.schema.timezone")
def test_add_datefield_and_datetimefield_use_effective_default(
self, mocked_datetime, mocked_tz
):
"""
effective_default() should be used for DateField, DateTimeField, and
TimeField if auto_now or auto_now_add is set (#25005).
"""
now = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1)
now_tz = datetime.datetime(
month=1, day=1, year=2000, hour=1, minute=1, tzinfo=datetime.timezone.utc
)
mocked_datetime.now = mock.MagicMock(return_value=now)
mocked_tz.now = mock.MagicMock(return_value=now_tz)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Check auto_now/auto_now_add attributes are not defined
columns = self.column_classes(Author)
self.assertNotIn("dob_auto_now", columns)
self.assertNotIn("dob_auto_now_add", columns)
self.assertNotIn("dtob_auto_now", columns)
self.assertNotIn("dtob_auto_now_add", columns)
self.assertNotIn("tob_auto_now", columns)
self.assertNotIn("tob_auto_now_add", columns)
# Create a row
Author.objects.create(name="Anonymous1")
# Ensure fields were added with the correct defaults
dob_auto_now = DateField(auto_now=True)
dob_auto_now.set_attributes_from_name("dob_auto_now")
self.check_added_field_default(
editor,
Author,
dob_auto_now,
"dob_auto_now",
now.date(),
cast_function=lambda x: x.date(),
)
dob_auto_now_add = DateField(auto_now_add=True)
dob_auto_now_add.set_attributes_from_name("dob_auto_now_add")
self.check_added_field_default(
editor,
Author,
dob_auto_now_add,
"dob_auto_now_add",
now.date(),
cast_function=lambda x: x.date(),
)
dtob_auto_now = DateTimeField(auto_now=True)
dtob_auto_now.set_attributes_from_name("dtob_auto_now")
self.check_added_field_default(
editor,
Author,
dtob_auto_now,
"dtob_auto_now",
now,
)
dt_tm_of_birth_auto_now_add = DateTimeField(auto_now_add=True)
dt_tm_of_birth_auto_now_add.set_attributes_from_name("dtob_auto_now_add")
self.check_added_field_default(
editor,
Author,
dt_tm_of_birth_auto_now_add,
"dtob_auto_now_add",
now,
)
tob_auto_now = TimeField(auto_now=True)
tob_auto_now.set_attributes_from_name("tob_auto_now")
self.check_added_field_default(
editor,
Author,
tob_auto_now,
"tob_auto_now",
now.time(),
cast_function=lambda x: x.time(),
)
tob_auto_now_add = TimeField(auto_now_add=True)
tob_auto_now_add.set_attributes_from_name("tob_auto_now_add")
self.check_added_field_default(
editor,
Author,
tob_auto_now_add,
"tob_auto_now_add",
now.time(),
cast_function=lambda x: x.time(),
)
def test_namespaced_db_table_create_index_name(self):
"""
Table names are stripped of their namespace/schema before being used to
generate index names.
"""
with connection.schema_editor() as editor:
max_name_length = connection.ops.max_name_length() or 200
namespace = "n" * max_name_length
table_name = "t" * max_name_length
namespaced_table_name = '"%s"."%s"' % (namespace, table_name)
self.assertEqual(
editor._create_index_name(table_name, []),
editor._create_index_name(namespaced_table_name, []),
)
@unittest.skipUnless(
connection.vendor == "oracle", "Oracle specific db_table syntax"
)
def test_creation_with_db_table_double_quotes(self):
oracle_user = connection.creation._test_database_user()
class Student(Model):
name = CharField(max_length=30)
class Meta:
app_label = "schema"
apps = new_apps
db_table = '"%s"."DJANGO_STUDENT_TABLE"' % oracle_user
class Document(Model):
name = CharField(max_length=30)
students = ManyToManyField(Student)
class Meta:
app_label = "schema"
apps = new_apps
db_table = '"%s"."DJANGO_DOCUMENT_TABLE"' % oracle_user
self.isolated_local_models = [Student, Document]
with connection.schema_editor() as editor:
editor.create_model(Student)
editor.create_model(Document)
doc = Document.objects.create(name="Test Name")
student = Student.objects.create(name="Some man")
doc.students.add(student)
@isolate_apps("schema")
@unittest.skipUnless(
connection.vendor == "postgresql", "PostgreSQL specific db_table syntax."
)
def test_namespaced_db_table_foreign_key_reference(self):
with connection.cursor() as cursor:
cursor.execute("CREATE SCHEMA django_schema_tests")
def delete_schema():
with connection.cursor() as cursor:
cursor.execute("DROP SCHEMA django_schema_tests CASCADE")
self.addCleanup(delete_schema)
class Author(Model):
class Meta:
app_label = "schema"
class Book(Model):
class Meta:
app_label = "schema"
db_table = '"django_schema_tests"."schema_book"'
author = ForeignKey(Author, CASCADE)
author.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.add_field(Book, author)
def test_rename_table_renames_deferred_sql_references(self):
atomic_rename = connection.features.supports_atomic_references_rename
with connection.schema_editor(atomic=atomic_rename) as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.alter_db_table(Author, "schema_author", "schema_renamed_author")
editor.alter_db_table(Author, "schema_book", "schema_renamed_book")
try:
self.assertGreater(len(editor.deferred_sql), 0)
for statement in editor.deferred_sql:
self.assertIs(statement.references_table("schema_author"), False)
self.assertIs(statement.references_table("schema_book"), False)
finally:
editor.alter_db_table(Author, "schema_renamed_author", "schema_author")
editor.alter_db_table(Author, "schema_renamed_book", "schema_book")
def test_rename_column_renames_deferred_sql_references(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
old_title = Book._meta.get_field("title")
new_title = CharField(max_length=100, db_index=True)
new_title.set_attributes_from_name("renamed_title")
editor.alter_field(Book, old_title, new_title)
old_author = Book._meta.get_field("author")
new_author = ForeignKey(Author, CASCADE)
new_author.set_attributes_from_name("renamed_author")
editor.alter_field(Book, old_author, new_author)
self.assertGreater(len(editor.deferred_sql), 0)
for statement in editor.deferred_sql:
self.assertIs(statement.references_column("book", "title"), False)
self.assertIs(statement.references_column("book", "author_id"), False)
@isolate_apps("schema")
def test_referenced_field_without_constraint_rename_inside_atomic_block(self):
"""
Foreign keys without database level constraint don't prevent the field
they reference from being renamed in an atomic block.
"""
class Foo(Model):
field = CharField(max_length=255, unique=True)
class Meta:
app_label = "schema"
class Bar(Model):
foo = ForeignKey(Foo, CASCADE, to_field="field", db_constraint=False)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo, Bar]
with connection.schema_editor() as editor:
editor.create_model(Foo)
editor.create_model(Bar)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("renamed")
with connection.schema_editor(atomic=True) as editor:
editor.alter_field(Foo, Foo._meta.get_field("field"), new_field)
@isolate_apps("schema")
def test_referenced_table_without_constraint_rename_inside_atomic_block(self):
"""
Foreign keys without database level constraint don't prevent the table
they reference from being renamed in an atomic block.
"""
class Foo(Model):
field = CharField(max_length=255, unique=True)
class Meta:
app_label = "schema"
class Bar(Model):
foo = ForeignKey(Foo, CASCADE, to_field="field", db_constraint=False)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo, Bar]
with connection.schema_editor() as editor:
editor.create_model(Foo)
editor.create_model(Bar)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("renamed")
with connection.schema_editor(atomic=True) as editor:
editor.alter_db_table(Foo, Foo._meta.db_table, "renamed_table")
Foo._meta.db_table = "renamed_table"
@isolate_apps("schema")
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_db_collation_charfield(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
class Foo(Model):
field = CharField(max_length=255, db_collation=collation)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo]
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.assertEqual(
self.get_column_collation(Foo._meta.db_table, "field"),
collation,
)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_collation_on_textfield")
def test_db_collation_textfield(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
class Foo(Model):
field = TextField(db_collation=collation)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo]
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.assertEqual(
self.get_column_collation(Foo._meta.db_table, "field"),
collation,
)
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_add_field_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Author)
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("alias")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertEqual(
columns["alias"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(columns["alias"][1][8], collation)
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_alter_field_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("name")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_column_collation(Author._meta.db_table, "name"),
collation,
)
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertIsNone(self.get_column_collation(Author._meta.db_table, "name"))
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_alter_primary_key_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Thing)
old_field = Thing._meta.get_field("when")
new_field = CharField(max_length=1, db_collation=collation, primary_key=True)
new_field.set_attributes_from_name("when")
new_field.model = Thing
with connection.schema_editor() as editor:
editor.alter_field(Thing, old_field, new_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
self.assertEqual(
self.get_column_collation(Thing._meta.db_table, "when"),
collation,
)
with connection.schema_editor() as editor:
editor.alter_field(Thing, new_field, old_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
self.assertIsNone(self.get_column_collation(Thing._meta.db_table, "when"))
@skipUnlessDBFeature(
"supports_collation_on_charfield", "supports_collation_on_textfield"
)
def test_alter_field_type_and_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("info")
new_field.model = Note
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(
columns["info"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(columns["info"][1][8], collation)
with connection.schema_editor() as editor:
editor.alter_field(Note, new_field, old_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns["info"][0], "TextField")
self.assertIsNone(columns["info"][1][8])
@skipUnlessDBFeature(
"supports_collation_on_charfield",
"supports_non_deterministic_collations",
)
def test_ci_cs_db_collation(self):
cs_collation = connection.features.test_collations.get("cs")
ci_collation = connection.features.test_collations.get("ci")
try:
if connection.vendor == "mysql":
cs_collation = "latin1_general_cs"
elif connection.vendor == "postgresql":
cs_collation = "en-x-icu"
with connection.cursor() as cursor:
cursor.execute(
"CREATE COLLATION IF NOT EXISTS case_insensitive "
"(provider = icu, locale = 'und-u-ks-level2', "
"deterministic = false)"
)
ci_collation = "case_insensitive"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
# Case-insensitive collation.
old_field = Author._meta.get_field("name")
new_field_ci = CharField(max_length=255, db_collation=ci_collation)
new_field_ci.set_attributes_from_name("name")
new_field_ci.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field_ci, strict=True)
Author.objects.create(name="ANDREW")
self.assertIs(Author.objects.filter(name="Andrew").exists(), True)
# Case-sensitive collation.
new_field_cs = CharField(max_length=255, db_collation=cs_collation)
new_field_cs.set_attributes_from_name("name")
new_field_cs.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field_ci, new_field_cs, strict=True)
self.assertIs(Author.objects.filter(name="Andrew").exists(), False)
finally:
if connection.vendor == "postgresql":
with connection.cursor() as cursor:
cursor.execute("DROP COLLATION IF EXISTS case_insensitive")
|
53a6632125e165cddd35308da5f255fdf2a8b55fdff002e55774f37dab84973e | import datetime
import os
from decimal import Decimal
from unittest import mock, skipUnless
from django import forms
from django.core.exceptions import (
NON_FIELD_ERRORS,
FieldError,
ImproperlyConfigured,
ValidationError,
)
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import connection, models
from django.db.models.query import EmptyQuerySet
from django.forms.models import (
ModelFormMetaclass,
construct_instance,
fields_for_model,
model_to_dict,
modelform_factory,
)
from django.template import Context, Template
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import isolate_apps
from .models import (
Article,
ArticleStatus,
Author,
Author1,
Award,
BetterWriter,
BigInt,
Book,
Category,
Character,
Colour,
ColourfulItem,
CustomErrorMessage,
CustomFF,
CustomFieldForExclusionModel,
DateTimePost,
DerivedBook,
DerivedPost,
Dice,
Document,
ExplicitPK,
FilePathModel,
FlexibleDatePost,
Homepage,
ImprovedArticle,
ImprovedArticleWithParentLink,
Inventory,
NullableUniqueCharFieldModel,
Number,
Person,
Photo,
Post,
Price,
Product,
Publication,
PublicationDefaults,
StrictAssignmentAll,
StrictAssignmentFieldSpecific,
Student,
StumpJoke,
TextFile,
Triple,
Writer,
WriterProfile,
test_images,
)
if test_images:
from .models import ImageFile, NoExtensionImageFile, OptionalImageFile
class ImageFileForm(forms.ModelForm):
class Meta:
model = ImageFile
fields = "__all__"
class OptionalImageFileForm(forms.ModelForm):
class Meta:
model = OptionalImageFile
fields = "__all__"
class NoExtensionImageFileForm(forms.ModelForm):
class Meta:
model = NoExtensionImageFile
fields = "__all__"
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields = "__all__"
class PriceForm(forms.ModelForm):
class Meta:
model = Price
fields = "__all__"
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = "__all__"
class DerivedBookForm(forms.ModelForm):
class Meta:
model = DerivedBook
fields = "__all__"
class ExplicitPKForm(forms.ModelForm):
class Meta:
model = ExplicitPK
fields = (
"key",
"desc",
)
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = "__all__"
class DerivedPostForm(forms.ModelForm):
class Meta:
model = DerivedPost
fields = "__all__"
class CustomWriterForm(forms.ModelForm):
name = forms.CharField(required=False)
class Meta:
model = Writer
fields = "__all__"
class BaseCategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = "__all__"
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = "__all__"
class RoykoForm(forms.ModelForm):
class Meta:
model = Writer
fields = "__all__"
class ArticleStatusForm(forms.ModelForm):
class Meta:
model = ArticleStatus
fields = "__all__"
class InventoryForm(forms.ModelForm):
class Meta:
model = Inventory
fields = "__all__"
class SelectInventoryForm(forms.Form):
items = forms.ModelMultipleChoiceField(
Inventory.objects.all(), to_field_name="barcode"
)
class CustomFieldForExclusionForm(forms.ModelForm):
class Meta:
model = CustomFieldForExclusionModel
fields = ["name", "markup"]
class TextFileForm(forms.ModelForm):
class Meta:
model = TextFile
fields = "__all__"
class BigIntForm(forms.ModelForm):
class Meta:
model = BigInt
fields = "__all__"
class ModelFormWithMedia(forms.ModelForm):
class Media:
js = ("/some/form/javascript",)
css = {"all": ("/some/form/css",)}
class Meta:
model = TextFile
fields = "__all__"
class CustomErrorMessageForm(forms.ModelForm):
name1 = forms.CharField(error_messages={"invalid": "Form custom error message."})
class Meta:
fields = "__all__"
model = CustomErrorMessage
class ModelFormBaseTest(TestCase):
def test_base_form(self):
self.assertEqual(list(BaseCategoryForm.base_fields), ["name", "slug", "url"])
def test_no_model_class(self):
class NoModelModelForm(forms.ModelForm):
pass
with self.assertRaisesMessage(
ValueError, "ModelForm has no model class specified."
):
NoModelModelForm()
def test_empty_fields_to_fields_for_model(self):
"""
An argument of fields=() to fields_for_model should return an empty dictionary
"""
field_dict = fields_for_model(Person, fields=())
self.assertEqual(len(field_dict), 0)
def test_empty_fields_on_modelform(self):
"""
No fields on a ModelForm should actually result in no fields.
"""
class EmptyPersonForm(forms.ModelForm):
class Meta:
model = Person
fields = ()
form = EmptyPersonForm()
self.assertEqual(len(form.fields), 0)
def test_empty_fields_to_construct_instance(self):
"""
No fields should be set on a model instance if construct_instance
receives fields=().
"""
form = modelform_factory(Person, fields="__all__")({"name": "John Doe"})
self.assertTrue(form.is_valid())
instance = construct_instance(form, Person(), fields=())
self.assertEqual(instance.name, "")
def test_blank_with_null_foreign_key_field(self):
"""
#13776 -- ModelForm's with models having a FK set to null=False and
required=False should be valid.
"""
class FormForTestingIsValid(forms.ModelForm):
class Meta:
model = Student
fields = "__all__"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["character"].required = False
char = Character.objects.create(
username="user", last_action=datetime.datetime.today()
)
data = {"study": "Engineering"}
data2 = {"study": "Engineering", "character": char.pk}
# form is valid because required=False for field 'character'
f1 = FormForTestingIsValid(data)
self.assertTrue(f1.is_valid())
f2 = FormForTestingIsValid(data2)
self.assertTrue(f2.is_valid())
obj = f2.save()
self.assertEqual(obj.character, char)
def test_blank_false_with_null_true_foreign_key_field(self):
"""
A ModelForm with a model having ForeignKey(blank=False, null=True)
and the form field set to required=False should allow the field to be
unset.
"""
class AwardForm(forms.ModelForm):
class Meta:
model = Award
fields = "__all__"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["character"].required = False
character = Character.objects.create(
username="user", last_action=datetime.datetime.today()
)
award = Award.objects.create(name="Best sprinter", character=character)
data = {"name": "Best tester", "character": ""} # remove character
form = AwardForm(data=data, instance=award)
self.assertTrue(form.is_valid())
award = form.save()
self.assertIsNone(award.character)
def test_blank_foreign_key_with_radio(self):
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = ["author"]
widgets = {"author": forms.RadioSelect()}
writer = Writer.objects.create(name="Joe Doe")
form = BookForm()
self.assertEqual(
list(form.fields["author"].choices),
[
("", "---------"),
(writer.pk, "Joe Doe"),
],
)
def test_non_blank_foreign_key_with_radio(self):
class AwardForm(forms.ModelForm):
class Meta:
model = Award
fields = ["character"]
widgets = {"character": forms.RadioSelect()}
character = Character.objects.create(
username="user",
last_action=datetime.datetime.today(),
)
form = AwardForm()
self.assertEqual(
list(form.fields["character"].choices),
[(character.pk, "user")],
)
def test_save_blank_false_with_required_false(self):
"""
A ModelForm with a model with a field set to blank=False and the form
field set to required=False should allow the field to be unset.
"""
obj = Writer.objects.create(name="test")
form = CustomWriterForm(data={"name": ""}, instance=obj)
self.assertTrue(form.is_valid())
obj = form.save()
self.assertEqual(obj.name, "")
def test_save_blank_null_unique_charfield_saves_null(self):
form_class = modelform_factory(
model=NullableUniqueCharFieldModel, fields="__all__"
)
empty_value = (
"" if connection.features.interprets_empty_strings_as_nulls else None
)
data = {
"codename": "",
"email": "",
"slug": "",
"url": "",
}
form = form_class(data=data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.instance.codename, empty_value)
self.assertEqual(form.instance.email, empty_value)
self.assertEqual(form.instance.slug, empty_value)
self.assertEqual(form.instance.url, empty_value)
# Save a second form to verify there isn't a unique constraint violation.
form = form_class(data=data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.instance.codename, empty_value)
self.assertEqual(form.instance.email, empty_value)
self.assertEqual(form.instance.slug, empty_value)
self.assertEqual(form.instance.url, empty_value)
def test_missing_fields_attribute(self):
message = (
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form "
"MissingFieldsForm needs updating."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
class MissingFieldsForm(forms.ModelForm):
class Meta:
model = Category
def test_extra_fields(self):
class ExtraFields(BaseCategoryForm):
some_extra_field = forms.BooleanField()
self.assertEqual(
list(ExtraFields.base_fields), ["name", "slug", "url", "some_extra_field"]
)
def test_extra_field_model_form(self):
with self.assertRaisesMessage(FieldError, "no-field"):
class ExtraPersonForm(forms.ModelForm):
"""ModelForm with an extra field"""
age = forms.IntegerField()
class Meta:
model = Person
fields = ("name", "no-field")
def test_extra_declared_field_model_form(self):
class ExtraPersonForm(forms.ModelForm):
"""ModelForm with an extra field"""
age = forms.IntegerField()
class Meta:
model = Person
fields = ("name", "age")
def test_extra_field_modelform_factory(self):
with self.assertRaisesMessage(
FieldError, "Unknown field(s) (no-field) specified for Person"
):
modelform_factory(Person, fields=["no-field", "name"])
def test_replace_field(self):
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = "__all__"
self.assertIsInstance(
ReplaceField.base_fields["url"], forms.fields.BooleanField
)
def test_replace_field_variant_2(self):
# Should have the same result as before,
# but 'fields' attribute specified differently
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = ["url"]
self.assertIsInstance(
ReplaceField.base_fields["url"], forms.fields.BooleanField
)
def test_replace_field_variant_3(self):
# Should have the same result as before,
# but 'fields' attribute specified differently
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = [] # url will still appear, since it is explicit above
self.assertIsInstance(
ReplaceField.base_fields["url"], forms.fields.BooleanField
)
def test_override_field(self):
class WriterForm(forms.ModelForm):
book = forms.CharField(required=False)
class Meta:
model = Writer
fields = "__all__"
wf = WriterForm({"name": "Richard Lockridge"})
self.assertTrue(wf.is_valid())
def test_limit_nonexistent_field(self):
expected_msg = "Unknown field(s) (nonexistent) specified for Category"
with self.assertRaisesMessage(FieldError, expected_msg):
class InvalidCategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ["nonexistent"]
def test_limit_fields_with_string(self):
msg = (
"CategoryForm.Meta.fields cannot be a string. Did you mean to type: "
"('url',)?"
)
with self.assertRaisesMessage(TypeError, msg):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = "url" # note the missing comma
def test_exclude_fields(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ["url"]
self.assertEqual(list(ExcludeFields.base_fields), ["name", "slug"])
def test_exclude_nonexistent_field(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ["nonexistent"]
self.assertEqual(list(ExcludeFields.base_fields), ["name", "slug", "url"])
def test_exclude_fields_with_string(self):
msg = (
"CategoryForm.Meta.exclude cannot be a string. Did you mean to type: "
"('url',)?"
)
with self.assertRaisesMessage(TypeError, msg):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
exclude = "url" # note the missing comma
def test_exclude_and_validation(self):
# This Price instance generated by this form is not valid because the quantity
# field is required, but the form is valid because the field is excluded from
# the form. This is for backwards compatibility.
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
exclude = ("quantity",)
form = PriceFormWithoutQuantity({"price": "6.00"})
self.assertTrue(form.is_valid())
price = form.save(commit=False)
msg = "{'quantity': ['This field cannot be null.']}"
with self.assertRaisesMessage(ValidationError, msg):
price.full_clean()
# The form should not validate fields that it doesn't contain even if they are
# specified using 'fields', not 'exclude'.
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
fields = ("price",)
form = PriceFormWithoutQuantity({"price": "6.00"})
self.assertTrue(form.is_valid())
# The form should still have an instance of a model that is not complete and
# not saved into a DB yet.
self.assertEqual(form.instance.price, Decimal("6.00"))
self.assertIsNone(form.instance.quantity)
self.assertIsNone(form.instance.pk)
def test_confused_form(self):
class ConfusedForm(forms.ModelForm):
"""Using 'fields' *and* 'exclude'. Not sure why you'd want to do
this, but uh, "be liberal in what you accept" and all.
"""
class Meta:
model = Category
fields = ["name", "url"]
exclude = ["url"]
self.assertEqual(list(ConfusedForm.base_fields), ["name"])
def test_mixmodel_form(self):
class MixModelForm(BaseCategoryForm):
"""Don't allow more than one 'model' definition in the
inheritance hierarchy. Technically, it would generate a valid
form, but the fact that the resulting save method won't deal with
multiple objects is likely to trip up people not familiar with the
mechanics.
"""
class Meta:
model = Article
fields = "__all__"
# MixModelForm is now an Article-related thing, because MixModelForm.Meta
# overrides BaseCategoryForm.Meta.
self.assertEqual(
list(MixModelForm.base_fields),
[
"headline",
"slug",
"pub_date",
"writer",
"article",
"categories",
"status",
],
)
def test_article_form(self):
self.assertEqual(
list(ArticleForm.base_fields),
[
"headline",
"slug",
"pub_date",
"writer",
"article",
"categories",
"status",
],
)
def test_bad_form(self):
# First class with a Meta class wins...
class BadForm(ArticleForm, BaseCategoryForm):
pass
self.assertEqual(
list(BadForm.base_fields),
[
"headline",
"slug",
"pub_date",
"writer",
"article",
"categories",
"status",
],
)
def test_invalid_meta_model(self):
class InvalidModelForm(forms.ModelForm):
class Meta:
pass # no model
# Can't create new form
msg = "ModelForm has no model class specified."
with self.assertRaisesMessage(ValueError, msg):
InvalidModelForm()
# Even if you provide a model instance
with self.assertRaisesMessage(ValueError, msg):
InvalidModelForm(instance=Category)
def test_subcategory_form(self):
class SubCategoryForm(BaseCategoryForm):
"""Subclassing without specifying a Meta on the class will use
the parent's Meta (or the first parent in the MRO if there are
multiple parent classes).
"""
pass
self.assertEqual(list(SubCategoryForm.base_fields), ["name", "slug", "url"])
def test_subclassmeta_form(self):
class SomeCategoryForm(forms.ModelForm):
checkbox = forms.BooleanField()
class Meta:
model = Category
fields = "__all__"
class SubclassMeta(SomeCategoryForm):
"""We can also subclass the Meta inner class to change the fields
list.
"""
class Meta(SomeCategoryForm.Meta):
exclude = ["url"]
self.assertHTMLEqual(
str(SubclassMeta()),
'<div><label for="id_name">Name:</label>'
'<input type="text" name="name" maxlength="20" required id="id_name">'
'</div><div><label for="id_slug">Slug:</label><input type="text" '
'name="slug" maxlength="20" required id="id_slug"></div><div>'
'<label for="id_checkbox">Checkbox:</label>'
'<input type="checkbox" name="checkbox" required id="id_checkbox"></div>',
)
def test_orderfields_form(self):
class OrderFields(forms.ModelForm):
class Meta:
model = Category
fields = ["url", "name"]
self.assertEqual(list(OrderFields.base_fields), ["url", "name"])
self.assertHTMLEqual(
str(OrderFields()),
'<div><label for="id_url">The URL:</label>'
'<input type="text" name="url" maxlength="40" required id="id_url">'
'</div><div><label for="id_name">Name:</label><input type="text" '
'name="name" maxlength="20" required id="id_name"></div>',
)
def test_orderfields2_form(self):
class OrderFields2(forms.ModelForm):
class Meta:
model = Category
fields = ["slug", "url", "name"]
exclude = ["url"]
self.assertEqual(list(OrderFields2.base_fields), ["slug", "name"])
def test_default_populated_on_optional_field(self):
class PubForm(forms.ModelForm):
mode = forms.CharField(max_length=255, required=False)
class Meta:
model = PublicationDefaults
fields = ("mode",)
# Empty data uses the model field default.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, "di")
self.assertEqual(m1._meta.get_field("mode").get_default(), "di")
# Blank data doesn't use the model field default.
mf2 = PubForm({"mode": ""})
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.mode, "")
def test_default_not_populated_on_non_empty_value_in_cleaned_data(self):
class PubForm(forms.ModelForm):
mode = forms.CharField(max_length=255, required=False)
mocked_mode = None
def clean(self):
self.cleaned_data["mode"] = self.mocked_mode
return self.cleaned_data
class Meta:
model = PublicationDefaults
fields = ("mode",)
pub_form = PubForm({})
pub_form.mocked_mode = "de"
pub = pub_form.save(commit=False)
self.assertEqual(pub.mode, "de")
# Default should be populated on an empty value in cleaned_data.
default_mode = "di"
for empty_value in pub_form.fields["mode"].empty_values:
with self.subTest(empty_value=empty_value):
pub_form = PubForm({})
pub_form.mocked_mode = empty_value
pub = pub_form.save(commit=False)
self.assertEqual(pub.mode, default_mode)
def test_default_not_populated_on_optional_checkbox_input(self):
class PubForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = ("active",)
# Empty data doesn't use the model default because CheckboxInput
# doesn't have a value in HTML form submission.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertIs(m1.active, False)
self.assertIsInstance(mf1.fields["active"].widget, forms.CheckboxInput)
self.assertIs(m1._meta.get_field("active").get_default(), True)
def test_default_not_populated_on_checkboxselectmultiple(self):
class PubForm(forms.ModelForm):
mode = forms.CharField(required=False, widget=forms.CheckboxSelectMultiple)
class Meta:
model = PublicationDefaults
fields = ("mode",)
# Empty data doesn't use the model default because an unchecked
# CheckboxSelectMultiple doesn't have a value in HTML form submission.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, "")
self.assertEqual(m1._meta.get_field("mode").get_default(), "di")
def test_default_not_populated_on_selectmultiple(self):
class PubForm(forms.ModelForm):
mode = forms.CharField(required=False, widget=forms.SelectMultiple)
class Meta:
model = PublicationDefaults
fields = ("mode",)
# Empty data doesn't use the model default because an unselected
# SelectMultiple doesn't have a value in HTML form submission.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, "")
self.assertEqual(m1._meta.get_field("mode").get_default(), "di")
def test_prefixed_form_with_default_field(self):
class PubForm(forms.ModelForm):
prefix = "form-prefix"
class Meta:
model = PublicationDefaults
fields = ("mode",)
mode = "de"
self.assertNotEqual(
mode, PublicationDefaults._meta.get_field("mode").get_default()
)
mf1 = PubForm({"form-prefix-mode": mode})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, mode)
def test_renderer_kwarg(self):
custom = object()
self.assertIs(ProductForm(renderer=custom).renderer, custom)
def test_default_splitdatetime_field(self):
class PubForm(forms.ModelForm):
datetime_published = forms.SplitDateTimeField(required=False)
class Meta:
model = PublicationDefaults
fields = ("datetime_published",)
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.datetime_published, datetime.datetime(2000, 1, 1))
mf2 = PubForm(
{"datetime_published_0": "2010-01-01", "datetime_published_1": "0:00:00"}
)
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.datetime_published, datetime.datetime(2010, 1, 1))
def test_default_filefield(self):
class PubForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = ("file",)
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.file.name, "default.txt")
mf2 = PubForm({}, {"file": SimpleUploadedFile("name", b"foo")})
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.file.name, "name")
def test_default_selectdatewidget(self):
class PubForm(forms.ModelForm):
date_published = forms.DateField(
required=False, widget=forms.SelectDateWidget
)
class Meta:
model = PublicationDefaults
fields = ("date_published",)
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.date_published, datetime.date.today())
mf2 = PubForm(
{
"date_published_year": "2010",
"date_published_month": "1",
"date_published_day": "1",
}
)
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.date_published, datetime.date(2010, 1, 1))
class FieldOverridesByFormMetaForm(forms.ModelForm):
class Meta:
model = Category
fields = ["name", "url", "slug"]
widgets = {
"name": forms.Textarea,
"url": forms.TextInput(attrs={"class": "url"}),
}
labels = {
"name": "Title",
}
help_texts = {
"slug": "Watch out! Letters, numbers, underscores and hyphens only.",
}
error_messages = {
"slug": {
"invalid": (
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!"
)
}
}
field_classes = {
"url": forms.URLField,
}
class TestFieldOverridesByFormMeta(SimpleTestCase):
def test_widget_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertHTMLEqual(
str(form["name"]),
'<textarea id="id_name" rows="10" cols="40" name="name" maxlength="20" '
"required></textarea>",
)
self.assertHTMLEqual(
str(form["url"]),
'<input id="id_url" type="text" class="url" name="url" maxlength="40" '
"required>",
)
self.assertHTMLEqual(
str(form["slug"]),
'<input id="id_slug" type="text" name="slug" maxlength="20" required>',
)
def test_label_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertHTMLEqual(
str(form["name"].label_tag()),
'<label for="id_name">Title:</label>',
)
self.assertHTMLEqual(
str(form["url"].label_tag()),
'<label for="id_url">The URL:</label>',
)
self.assertHTMLEqual(
str(form["slug"].label_tag()),
'<label for="id_slug">Slug:</label>',
)
self.assertHTMLEqual(
form["name"].legend_tag(),
'<legend for="id_name">Title:</legend>',
)
self.assertHTMLEqual(
form["url"].legend_tag(),
'<legend for="id_url">The URL:</legend>',
)
self.assertHTMLEqual(
form["slug"].legend_tag(),
'<legend for="id_slug">Slug:</legend>',
)
def test_help_text_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertEqual(
form["slug"].help_text,
"Watch out! Letters, numbers, underscores and hyphens only.",
)
def test_error_messages_overrides(self):
form = FieldOverridesByFormMetaForm(
data={
"name": "Category",
"url": "http://www.example.com/category/",
"slug": "!%#*@",
}
)
form.full_clean()
error = [
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!",
]
self.assertEqual(form.errors, {"slug": error})
def test_field_type_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertIs(Category._meta.get_field("url").__class__, models.CharField)
self.assertIsInstance(form.fields["url"], forms.URLField)
class IncompleteCategoryFormWithFields(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
fields = ("name", "slug")
model = Category
class IncompleteCategoryFormWithExclude(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
exclude = ["url"]
model = Category
class ValidationTest(SimpleTestCase):
def test_validates_with_replaced_field_not_specified(self):
form = IncompleteCategoryFormWithFields(
data={"name": "some name", "slug": "some-slug"}
)
self.assertIs(form.is_valid(), True)
def test_validates_with_replaced_field_excluded(self):
form = IncompleteCategoryFormWithExclude(
data={"name": "some name", "slug": "some-slug"}
)
self.assertIs(form.is_valid(), True)
def test_notrequired_overrides_notblank(self):
form = CustomWriterForm({})
self.assertIs(form.is_valid(), True)
class UniqueTest(TestCase):
"""
unique/unique_together validation.
"""
@classmethod
def setUpTestData(cls):
cls.writer = Writer.objects.create(name="Mike Royko")
def test_simple_unique(self):
form = ProductForm({"slug": "teddy-bear-blue"})
self.assertTrue(form.is_valid())
obj = form.save()
form = ProductForm({"slug": "teddy-bear-blue"})
self.assertEqual(len(form.errors), 1)
self.assertEqual(
form.errors["slug"], ["Product with this Slug already exists."]
)
form = ProductForm({"slug": "teddy-bear-blue"}, instance=obj)
self.assertTrue(form.is_valid())
def test_unique_together(self):
"""ModelForm test of unique_together constraint"""
form = PriceForm({"price": "6.00", "quantity": "1"})
self.assertTrue(form.is_valid())
form.save()
form = PriceForm({"price": "6.00", "quantity": "1"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(
form.errors["__all__"],
["Price with this Price and Quantity already exists."],
)
def test_unique_together_exclusion(self):
"""
Forms don't validate unique_together constraints when only part of the
constraint is included in the form's fields. This allows using
form.save(commit=False) and then assigning the missing field(s) to the
model instance.
"""
class BookForm(forms.ModelForm):
class Meta:
model = DerivedBook
fields = ("isbn", "suffix1")
# The unique_together is on suffix1/suffix2 but only suffix1 is part
# of the form. The fields must have defaults, otherwise they'll be
# skipped by other logic.
self.assertEqual(DerivedBook._meta.unique_together, (("suffix1", "suffix2"),))
for name in ("suffix1", "suffix2"):
with self.subTest(name=name):
field = DerivedBook._meta.get_field(name)
self.assertEqual(field.default, 0)
# The form fails validation with "Derived book with this Suffix1 and
# Suffix2 already exists." if the unique_together validation isn't
# skipped.
DerivedBook.objects.create(isbn="12345")
form = BookForm({"isbn": "56789", "suffix1": "0"})
self.assertTrue(form.is_valid(), form.errors)
def test_multiple_field_unique_together(self):
"""
When the same field is involved in multiple unique_together
constraints, we need to make sure we don't remove the data for it
before doing all the validation checking (not just failing after
the first one).
"""
class TripleForm(forms.ModelForm):
class Meta:
model = Triple
fields = "__all__"
Triple.objects.create(left=1, middle=2, right=3)
form = TripleForm({"left": "1", "middle": "2", "right": "3"})
self.assertFalse(form.is_valid())
form = TripleForm({"left": "1", "middle": "3", "right": "1"})
self.assertTrue(form.is_valid())
@skipUnlessDBFeature("supports_nullable_unique_constraints")
def test_unique_null(self):
title = "I May Be Wrong But I Doubt It"
form = BookForm({"title": title, "author": self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({"title": title, "author": self.writer.pk})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(
form.errors["__all__"], ["Book with this Title and Author already exists."]
)
form = BookForm({"title": title})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({"title": title})
self.assertTrue(form.is_valid())
def test_inherited_unique(self):
title = "Boss"
Book.objects.create(title=title, author=self.writer, special_id=1)
form = DerivedBookForm(
{
"title": "Other",
"author": self.writer.pk,
"special_id": "1",
"isbn": "12345",
}
)
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(
form.errors["special_id"], ["Book with this Special id already exists."]
)
def test_inherited_unique_together(self):
title = "Boss"
form = BookForm({"title": title, "author": self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = DerivedBookForm(
{"title": title, "author": self.writer.pk, "isbn": "12345"}
)
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(
form.errors["__all__"], ["Book with this Title and Author already exists."]
)
def test_abstract_inherited_unique(self):
title = "Boss"
isbn = "12345"
DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm(
{
"title": "Other",
"author": self.writer.pk,
"isbn": isbn,
"suffix1": "1",
"suffix2": "2",
}
)
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(
form.errors["isbn"], ["Derived book with this Isbn already exists."]
)
def test_abstract_inherited_unique_together(self):
title = "Boss"
isbn = "12345"
DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm(
{
"title": "Other",
"author": self.writer.pk,
"isbn": "9876",
"suffix1": "0",
"suffix2": "0",
}
)
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(
form.errors["__all__"],
["Derived book with this Suffix1 and Suffix2 already exists."],
)
def test_explicitpk_unspecified(self):
"""Test for primary_key being in the form and failing validation."""
form = ExplicitPKForm({"key": "", "desc": ""})
self.assertFalse(form.is_valid())
def test_explicitpk_unique(self):
"""Ensure keys and blank character strings are tested for uniqueness."""
form = ExplicitPKForm({"key": "key1", "desc": ""})
self.assertTrue(form.is_valid())
form.save()
form = ExplicitPKForm({"key": "key1", "desc": ""})
self.assertFalse(form.is_valid())
if connection.features.interprets_empty_strings_as_nulls:
self.assertEqual(len(form.errors), 1)
self.assertEqual(
form.errors["key"], ["Explicit pk with this Key already exists."]
)
else:
self.assertEqual(len(form.errors), 3)
self.assertEqual(
form.errors["__all__"],
["Explicit pk with this Key and Desc already exists."],
)
self.assertEqual(
form.errors["desc"], ["Explicit pk with this Desc already exists."]
)
self.assertEqual(
form.errors["key"], ["Explicit pk with this Key already exists."]
)
def test_unique_for_date(self):
p = Post.objects.create(
title="Django 1.0 is released",
slug="Django 1.0",
subtitle="Finally",
posted=datetime.date(2008, 9, 3),
)
form = PostForm({"title": "Django 1.0 is released", "posted": "2008-09-03"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(
form.errors["title"], ["Title must be unique for Posted date."]
)
form = PostForm({"title": "Work on Django 1.1 begins", "posted": "2008-09-03"})
self.assertTrue(form.is_valid())
form = PostForm({"title": "Django 1.0 is released", "posted": "2008-09-04"})
self.assertTrue(form.is_valid())
form = PostForm({"slug": "Django 1.0", "posted": "2008-01-01"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors["slug"], ["Slug must be unique for Posted year."])
form = PostForm({"subtitle": "Finally", "posted": "2008-09-30"})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors["subtitle"], ["Subtitle must be unique for Posted month."]
)
data = {
"subtitle": "Finally",
"title": "Django 1.0 is released",
"slug": "Django 1.0",
"posted": "2008-09-03",
}
form = PostForm(data, instance=p)
self.assertTrue(form.is_valid())
form = PostForm({"title": "Django 1.0 is released"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors["posted"], ["This field is required."])
def test_unique_for_date_in_exclude(self):
"""
If the date for unique_for_* constraints is excluded from the
ModelForm (in this case 'posted' has editable=False, then the
constraint should be ignored.
"""
class DateTimePostForm(forms.ModelForm):
class Meta:
model = DateTimePost
fields = "__all__"
DateTimePost.objects.create(
title="Django 1.0 is released",
slug="Django 1.0",
subtitle="Finally",
posted=datetime.datetime(2008, 9, 3, 10, 10, 1),
)
# 'title' has unique_for_date='posted'
form = DateTimePostForm(
{"title": "Django 1.0 is released", "posted": "2008-09-03"}
)
self.assertTrue(form.is_valid())
# 'slug' has unique_for_year='posted'
form = DateTimePostForm({"slug": "Django 1.0", "posted": "2008-01-01"})
self.assertTrue(form.is_valid())
# 'subtitle' has unique_for_month='posted'
form = DateTimePostForm({"subtitle": "Finally", "posted": "2008-09-30"})
self.assertTrue(form.is_valid())
def test_inherited_unique_for_date(self):
p = Post.objects.create(
title="Django 1.0 is released",
slug="Django 1.0",
subtitle="Finally",
posted=datetime.date(2008, 9, 3),
)
form = DerivedPostForm(
{"title": "Django 1.0 is released", "posted": "2008-09-03"}
)
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(
form.errors["title"], ["Title must be unique for Posted date."]
)
form = DerivedPostForm(
{"title": "Work on Django 1.1 begins", "posted": "2008-09-03"}
)
self.assertTrue(form.is_valid())
form = DerivedPostForm(
{"title": "Django 1.0 is released", "posted": "2008-09-04"}
)
self.assertTrue(form.is_valid())
form = DerivedPostForm({"slug": "Django 1.0", "posted": "2008-01-01"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors["slug"], ["Slug must be unique for Posted year."])
form = DerivedPostForm({"subtitle": "Finally", "posted": "2008-09-30"})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors["subtitle"], ["Subtitle must be unique for Posted month."]
)
data = {
"subtitle": "Finally",
"title": "Django 1.0 is released",
"slug": "Django 1.0",
"posted": "2008-09-03",
}
form = DerivedPostForm(data, instance=p)
self.assertTrue(form.is_valid())
def test_unique_for_date_with_nullable_date(self):
class FlexDatePostForm(forms.ModelForm):
class Meta:
model = FlexibleDatePost
fields = "__all__"
p = FlexibleDatePost.objects.create(
title="Django 1.0 is released",
slug="Django 1.0",
subtitle="Finally",
posted=datetime.date(2008, 9, 3),
)
form = FlexDatePostForm({"title": "Django 1.0 is released"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({"slug": "Django 1.0"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({"subtitle": "Finally"})
self.assertTrue(form.is_valid())
data = {
"subtitle": "Finally",
"title": "Django 1.0 is released",
"slug": "Django 1.0",
}
form = FlexDatePostForm(data, instance=p)
self.assertTrue(form.is_valid())
def test_override_unique_message(self):
class CustomProductForm(ProductForm):
class Meta(ProductForm.Meta):
error_messages = {
"slug": {
"unique": "%(model_name)s's %(field_label)s not unique.",
}
}
Product.objects.create(slug="teddy-bear-blue")
form = CustomProductForm({"slug": "teddy-bear-blue"})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors["slug"], ["Product's Slug not unique."])
def test_override_unique_together_message(self):
class CustomPriceForm(PriceForm):
class Meta(PriceForm.Meta):
error_messages = {
NON_FIELD_ERRORS: {
"unique_together": (
"%(model_name)s's %(field_labels)s not unique."
),
}
}
Price.objects.create(price=6.00, quantity=1)
form = CustomPriceForm({"price": "6.00", "quantity": "1"})
self.assertEqual(len(form.errors), 1)
self.assertEqual(
form.errors[NON_FIELD_ERRORS], ["Price's Price and Quantity not unique."]
)
def test_override_unique_for_date_message(self):
class CustomPostForm(PostForm):
class Meta(PostForm.Meta):
error_messages = {
"title": {
"unique_for_date": (
"%(model_name)s's %(field_label)s not unique "
"for %(date_field_label)s date."
),
}
}
Post.objects.create(
title="Django 1.0 is released",
slug="Django 1.0",
subtitle="Finally",
posted=datetime.date(2008, 9, 3),
)
form = CustomPostForm(
{"title": "Django 1.0 is released", "posted": "2008-09-03"}
)
self.assertEqual(len(form.errors), 1)
self.assertEqual(
form.errors["title"], ["Post's Title not unique for Posted date."]
)
class ModelFormBasicTests(TestCase):
def create_basic_data(self):
self.c1 = Category.objects.create(
name="Entertainment", slug="entertainment", url="entertainment"
)
self.c2 = Category.objects.create(
name="It's a test", slug="its-test", url="test"
)
self.c3 = Category.objects.create(
name="Third test", slug="third-test", url="third"
)
self.w_royko = Writer.objects.create(name="Mike Royko")
self.w_woodward = Writer.objects.create(name="Bob Woodward")
def test_base_form(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm()
self.assertHTMLEqual(
str(f),
'<div><label for="id_name">Name:</label><input type="text" name="name" '
'maxlength="20" required id="id_name"></div><div><label for="id_slug">Slug:'
'</label><input type="text" name="slug" maxlength="20" required '
'id="id_slug"></div><div><label for="id_url">The URL:</label>'
'<input type="text" name="url" maxlength="40" required id="id_url"></div>',
)
self.assertHTMLEqual(
str(f.as_ul()),
"""
<li><label for="id_name">Name:</label>
<input id="id_name" type="text" name="name" maxlength="20" required></li>
<li><label for="id_slug">Slug:</label>
<input id="id_slug" type="text" name="slug" maxlength="20" required></li>
<li><label for="id_url">The URL:</label>
<input id="id_url" type="text" name="url" maxlength="40" required></li>
""",
)
self.assertHTMLEqual(
str(f["name"]),
"""<input id="id_name" type="text" name="name" maxlength="20" required>""",
)
def test_auto_id(self):
f = BaseCategoryForm(auto_id=False)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li>Name: <input type="text" name="name" maxlength="20" required></li>
<li>Slug: <input type="text" name="slug" maxlength="20" required></li>
<li>The URL: <input type="text" name="url" maxlength="40" required></li>""",
)
def test_initial_values(self):
self.create_basic_data()
# Initial values can be provided for model forms
f = ArticleForm(
auto_id=False,
initial={
"headline": "Your headline here",
"categories": [str(self.c1.id), str(self.c2.id)],
},
)
self.assertHTMLEqual(
f.as_ul(),
"""
<li>Headline:
<input type="text" name="headline" value="Your headline here" maxlength="50"
required>
</li>
<li>Slug: <input type="text" name="slug" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" required></li>
<li>Writer: <select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article:
<textarea rows="10" cols="40" name="article" required></textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s" selected>Entertainment</option>
<option value="%s" selected>It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>
"""
% (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk),
)
# When the ModelForm is passed an instance, that instance's current values are
# inserted as 'initial' data in each Field.
f = RoykoForm(auto_id=False, instance=self.w_royko)
self.assertHTMLEqual(
str(f),
'<div>Name:<div class="helptext">Use both first and last names.</div>'
'<input type="text" name="name" value="Mike Royko" maxlength="50" '
"required></div>",
)
art = Article.objects.create(
headline="Test article",
slug="test-article",
pub_date=datetime.date(1988, 1, 4),
writer=self.w_royko,
article="Hello.",
)
art_id_1 = art.id
f = ArticleForm(auto_id=False, instance=art)
self.assertHTMLEqual(
f.as_ul(),
"""
<li>Headline:
<input type="text" name="headline" value="Test article" maxlength="50"
required>
</li>
<li>Slug:
<input type="text" name="slug" value="test-article" maxlength="50" required>
</li>
<li>Pub date:
<input type="text" name="pub_date" value="1988-01-04" required></li>
<li>Writer: <select name="writer" required>
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected>Mike Royko</option>
</select></li>
<li>Article:
<textarea rows="10" cols="40" name="article" required>Hello.</textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>
"""
% (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk),
)
f = ArticleForm(
{
"headline": "Test headline",
"slug": "test-headline",
"pub_date": "1984-02-06",
"writer": str(self.w_royko.pk),
"article": "Hello.",
},
instance=art,
)
self.assertEqual(f.errors, {})
self.assertTrue(f.is_valid())
test_art = f.save()
self.assertEqual(test_art.id, art_id_1)
test_art = Article.objects.get(id=art_id_1)
self.assertEqual(test_art.headline, "Test headline")
def test_m2m_initial_callable(self):
"""
A callable can be provided as the initial value for an m2m field.
"""
self.maxDiff = 1200
self.create_basic_data()
# Set up a callable initial value
def formfield_for_dbfield(db_field, **kwargs):
if db_field.name == "categories":
kwargs["initial"] = lambda: Category.objects.order_by("name")[:2]
return db_field.formfield(**kwargs)
# Create a ModelForm, instantiate it, and check that the output is as expected
ModelForm = modelform_factory(
Article,
fields=["headline", "categories"],
formfield_callback=formfield_for_dbfield,
)
form = ModelForm()
self.assertHTMLEqual(
form.as_ul(),
"""<li><label for="id_headline">Headline:</label>
<input id="id_headline" type="text" name="headline" maxlength="50" required></li>
<li><label for="id_categories">Categories:</label>
<select multiple name="categories" id="id_categories">
<option value="%d" selected>Entertainment</option>
<option value="%d" selected>It's a test</option>
<option value="%d">Third test</option>
</select></li>"""
% (self.c1.pk, self.c2.pk, self.c3.pk),
)
def test_basic_creation(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm(
{
"name": "Entertainment",
"slug": "entertainment",
"url": "entertainment",
}
)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data["name"], "Entertainment")
self.assertEqual(f.cleaned_data["slug"], "entertainment")
self.assertEqual(f.cleaned_data["url"], "entertainment")
c1 = f.save()
# Testing whether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(Category.objects.count(), 1)
self.assertEqual(c1, Category.objects.all()[0])
self.assertEqual(c1.name, "Entertainment")
def test_save_commit_false(self):
# If you call save() with commit=False, then it will return an object that
# hasn't yet been saved to the database. In this case, it's up to you to call
# save() on the resulting model instance.
f = BaseCategoryForm(
{"name": "Third test", "slug": "third-test", "url": "third"}
)
self.assertTrue(f.is_valid())
c1 = f.save(commit=False)
self.assertEqual(c1.name, "Third test")
self.assertEqual(Category.objects.count(), 0)
c1.save()
self.assertEqual(Category.objects.count(), 1)
def test_save_with_data_errors(self):
# If you call save() with invalid data, you'll get a ValueError.
f = BaseCategoryForm({"name": "", "slug": "not a slug!", "url": "foo"})
self.assertEqual(f.errors["name"], ["This field is required."])
self.assertEqual(
f.errors["slug"],
[
"Enter a valid “slug” consisting of letters, numbers, underscores or "
"hyphens."
],
)
self.assertEqual(f.cleaned_data, {"url": "foo"})
msg = "The Category could not be created because the data didn't validate."
with self.assertRaisesMessage(ValueError, msg):
f.save()
f = BaseCategoryForm({"name": "", "slug": "", "url": "foo"})
with self.assertRaisesMessage(ValueError, msg):
f.save()
def test_multi_fields(self):
self.create_basic_data()
self.maxDiff = None
# ManyToManyFields are represented by a MultipleChoiceField, ForeignKeys and any
# fields with the 'choices' attribute are represented by a ChoiceField.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(
str(f),
"""
<div>Headline:
<input type="text" name="headline" maxlength="50" required>
</div>
<div>Slug:
<input type="text" name="slug" maxlength="50" required>
</div>
<div>Pub date:
<input type="text" name="pub_date" required>
</div>
<div>Writer:
<select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select>
</div>
<div>Article:
<textarea name="article" cols="40" rows="10" required></textarea>
</div>
<div>Categories:
<select name="categories" multiple>
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select>
</div>
<div>Status:
<select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option><option value="2">Pending</option>
<option value="3">Live</option>
</select>
</div>
"""
% (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk),
)
# Add some categories and test the many-to-many form output.
new_art = Article.objects.create(
article="Hello.",
headline="New headline",
slug="new-headline",
pub_date=datetime.date(1988, 1, 4),
writer=self.w_royko,
)
new_art.categories.add(Category.objects.get(name="Entertainment"))
self.assertSequenceEqual(new_art.categories.all(), [self.c1])
f = ArticleForm(auto_id=False, instance=new_art)
self.assertHTMLEqual(
f.as_ul(),
"""
<li>Headline:
<input type="text" name="headline" value="New headline" maxlength="50"
required>
</li>
<li>Slug:
<input type="text" name="slug" value="new-headline" maxlength="50" required>
</li>
<li>Pub date:
<input type="text" name="pub_date" value="1988-01-04" required></li>
<li>Writer: <select name="writer" required>
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected>Mike Royko</option>
</select></li>
<li>Article:
<textarea rows="10" cols="40" name="article" required>Hello.</textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s" selected>Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>
"""
% (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk),
)
def test_subset_fields(self):
# You can restrict a form to a subset of the complete list of fields
# by providing a 'fields' argument. If you try to save a
# model created with such a form, you need to ensure that the fields
# that are _not_ on the form have default values, or are allowed to have
# a value of None. If a field isn't specified on a form, the object created
# from the form can't provide a value for that field!
class PartialArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ("headline", "pub_date")
f = PartialArticleForm(auto_id=False)
self.assertHTMLEqual(
str(f),
'<div>Headline:<input type="text" name="headline" maxlength="50" required>'
'</div><div>Pub date:<input type="text" name="pub_date" required></div>',
)
class PartialArticleFormWithSlug(forms.ModelForm):
class Meta:
model = Article
fields = ("headline", "slug", "pub_date")
w_royko = Writer.objects.create(name="Mike Royko")
art = Article.objects.create(
article="Hello.",
headline="New headline",
slug="new-headline",
pub_date=datetime.date(1988, 1, 4),
writer=w_royko,
)
f = PartialArticleFormWithSlug(
{
"headline": "New headline",
"slug": "new-headline",
"pub_date": "1988-01-04",
},
auto_id=False,
instance=art,
)
self.assertHTMLEqual(
f.as_ul(),
"""
<li>Headline:
<input type="text" name="headline" value="New headline" maxlength="50"
required>
</li>
<li>Slug:
<input type="text" name="slug" value="new-headline" maxlength="50"
required>
</li>
<li>Pub date:
<input type="text" name="pub_date" value="1988-01-04" required></li>
""",
)
self.assertTrue(f.is_valid())
new_art = f.save()
self.assertEqual(new_art.id, art.id)
new_art = Article.objects.get(id=art.id)
self.assertEqual(new_art.headline, "New headline")
def test_m2m_editing(self):
self.create_basic_data()
form_data = {
"headline": "New headline",
"slug": "new-headline",
"pub_date": "1988-01-04",
"writer": str(self.w_royko.pk),
"article": "Hello.",
"categories": [str(self.c1.id), str(self.c2.id)],
}
# Create a new article, with categories, via the form.
f = ArticleForm(form_data)
new_art = f.save()
new_art = Article.objects.get(id=new_art.id)
art_id_1 = new_art.id
self.assertSequenceEqual(
new_art.categories.order_by("name"), [self.c1, self.c2]
)
# Now, submit form data with no categories. This deletes the existing
# categories.
form_data["categories"] = []
f = ArticleForm(form_data, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id, art_id_1)
new_art = Article.objects.get(id=art_id_1)
self.assertSequenceEqual(new_art.categories.all(), [])
# Create a new article, with no categories, via the form.
f = ArticleForm(form_data)
new_art = f.save()
art_id_2 = new_art.id
self.assertNotIn(art_id_2, (None, art_id_1))
new_art = Article.objects.get(id=art_id_2)
self.assertSequenceEqual(new_art.categories.all(), [])
# Create a new article, with categories, via the form, but use commit=False.
# The m2m data won't be saved until save_m2m() is invoked on the form.
form_data["categories"] = [str(self.c1.id), str(self.c2.id)]
f = ArticleForm(form_data)
new_art = f.save(commit=False)
# Manually save the instance
new_art.save()
art_id_3 = new_art.id
self.assertNotIn(art_id_3, (None, art_id_1, art_id_2))
# The instance doesn't have m2m data yet
new_art = Article.objects.get(id=art_id_3)
self.assertSequenceEqual(new_art.categories.all(), [])
# Save the m2m data on the form
f.save_m2m()
self.assertSequenceEqual(
new_art.categories.order_by("name"), [self.c1, self.c2]
)
def test_custom_form_fields(self):
# Here, we define a custom ModelForm. Because it happens to have the
# same fields as the Category model, we can just call the form's save()
# to apply its changes to an existing Category instance.
class ShortCategory(forms.ModelForm):
name = forms.CharField(max_length=5)
slug = forms.CharField(max_length=5)
url = forms.CharField(max_length=3)
class Meta:
model = Category
fields = "__all__"
cat = Category.objects.create(name="Third test")
form = ShortCategory(
{"name": "Third", "slug": "third", "url": "3rd"}, instance=cat
)
self.assertEqual(form.save().name, "Third")
self.assertEqual(Category.objects.get(id=cat.id).name, "Third")
def test_runtime_choicefield_populated(self):
self.maxDiff = None
# Here, we demonstrate that choices for a ForeignKey ChoiceField are determined
# at runtime, based on the data in the database when the form is displayed, not
# the data in the database when the form is instantiated.
self.create_basic_data()
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(
f.as_ul(),
'<li>Headline: <input type="text" name="headline" maxlength="50" required>'
"</li>"
'<li>Slug: <input type="text" name="slug" maxlength="50" required></li>'
'<li>Pub date: <input type="text" name="pub_date" required></li>'
'<li>Writer: <select name="writer" required>'
'<option value="" selected>---------</option>'
'<option value="%s">Bob Woodward</option>'
'<option value="%s">Mike Royko</option>'
"</select></li>"
'<li>Article: <textarea rows="10" cols="40" name="article" required>'
"</textarea></li>"
'<li>Categories: <select multiple name="categories">'
'<option value="%s">Entertainment</option>'
'<option value="%s">It's a test</option>'
'<option value="%s">Third test</option>'
"</select> </li>"
'<li>Status: <select name="status">'
'<option value="" selected>---------</option>'
'<option value="1">Draft</option>'
'<option value="2">Pending</option>'
'<option value="3">Live</option>'
"</select></li>"
% (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk),
)
c4 = Category.objects.create(name="Fourth", url="4th")
w_bernstein = Writer.objects.create(name="Carl Bernstein")
self.assertHTMLEqual(
f.as_ul(),
'<li>Headline: <input type="text" name="headline" maxlength="50" required>'
"</li>"
'<li>Slug: <input type="text" name="slug" maxlength="50" required></li>'
'<li>Pub date: <input type="text" name="pub_date" required></li>'
'<li>Writer: <select name="writer" required>'
'<option value="" selected>---------</option>'
'<option value="%s">Bob Woodward</option>'
'<option value="%s">Carl Bernstein</option>'
'<option value="%s">Mike Royko</option>'
"</select></li>"
'<li>Article: <textarea rows="10" cols="40" name="article" required>'
"</textarea></li>"
'<li>Categories: <select multiple name="categories">'
'<option value="%s">Entertainment</option>'
'<option value="%s">It's a test</option>'
'<option value="%s">Third test</option>'
'<option value="%s">Fourth</option>'
"</select></li>"
'<li>Status: <select name="status">'
'<option value="" selected>---------</option>'
'<option value="1">Draft</option>'
'<option value="2">Pending</option>'
'<option value="3">Live</option>'
"</select></li>"
% (
self.w_woodward.pk,
w_bernstein.pk,
self.w_royko.pk,
self.c1.pk,
self.c2.pk,
self.c3.pk,
c4.pk,
),
)
def test_recleaning_model_form_instance(self):
"""
Re-cleaning an instance that was added via a ModelForm shouldn't raise
a pk uniqueness error.
"""
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = "__all__"
form = AuthorForm({"full_name": "Bob"})
self.assertTrue(form.is_valid())
obj = form.save()
obj.name = "Alice"
obj.full_clean()
def test_validate_foreign_key_uses_default_manager(self):
class MyForm(forms.ModelForm):
class Meta:
model = Article
fields = "__all__"
# Archived writers are filtered out by the default manager.
w = Writer.objects.create(name="Randy", archived=True)
data = {
"headline": "My Article",
"slug": "my-article",
"pub_date": datetime.date.today(),
"writer": w.pk,
"article": "lorem ipsum",
}
form = MyForm(data)
self.assertIs(form.is_valid(), False)
self.assertEqual(
form.errors,
{
"writer": [
"Select a valid choice. That choice is not one of the available "
"choices."
]
},
)
def test_validate_foreign_key_to_model_with_overridden_manager(self):
class MyForm(forms.ModelForm):
class Meta:
model = Article
fields = "__all__"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Allow archived authors.
self.fields["writer"].queryset = Writer._base_manager.all()
w = Writer.objects.create(name="Randy", archived=True)
data = {
"headline": "My Article",
"slug": "my-article",
"pub_date": datetime.date.today(),
"writer": w.pk,
"article": "lorem ipsum",
}
form = MyForm(data)
self.assertIs(form.is_valid(), True)
article = form.save()
self.assertEqual(article.writer, w)
class ModelMultipleChoiceFieldTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.c1 = Category.objects.create(
name="Entertainment", slug="entertainment", url="entertainment"
)
cls.c2 = Category.objects.create(
name="It's a test", slug="its-test", url="test"
)
cls.c3 = Category.objects.create(name="Third", slug="third-test", url="third")
def test_model_multiple_choice_field(self):
f = forms.ModelMultipleChoiceField(Category.objects.all())
self.assertCountEqual(
list(f.choices),
[
(self.c1.pk, "Entertainment"),
(self.c2.pk, "It's a test"),
(self.c3.pk, "Third"),
],
)
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean([])
self.assertCountEqual(f.clean([self.c1.id]), [self.c1])
self.assertCountEqual(f.clean([self.c2.id]), [self.c2])
self.assertCountEqual(f.clean([str(self.c1.id)]), [self.c1])
self.assertCountEqual(
f.clean([str(self.c1.id), str(self.c2.id)]),
[self.c1, self.c2],
)
self.assertCountEqual(
f.clean([self.c1.id, str(self.c2.id)]),
[self.c1, self.c2],
)
self.assertCountEqual(
f.clean((self.c1.id, str(self.c2.id))),
[self.c1, self.c2],
)
with self.assertRaises(ValidationError):
f.clean(["0"])
with self.assertRaises(ValidationError):
f.clean("hello")
with self.assertRaises(ValidationError):
f.clean(["fail"])
# Invalid types that require TypeError to be caught (#22808).
with self.assertRaises(ValidationError):
f.clean([["fail"]])
with self.assertRaises(ValidationError):
f.clean([{"foo": "bar"}])
# Add a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
# Note, we are using an id of 1006 here since tests that run before
# this may create categories with primary keys up to 6. Use
# a number that will not conflict.
c6 = Category.objects.create(id=1006, name="Sixth", url="6th")
self.assertCountEqual(f.clean([c6.id]), [c6])
# Delete a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url="6th").delete()
with self.assertRaises(ValidationError):
f.clean([c6.id])
def test_model_multiple_choice_required_false(self):
f = forms.ModelMultipleChoiceField(Category.objects.all(), required=False)
self.assertIsInstance(f.clean([]), EmptyQuerySet)
self.assertIsInstance(f.clean(()), EmptyQuerySet)
with self.assertRaises(ValidationError):
f.clean(["0"])
with self.assertRaises(ValidationError):
f.clean([str(self.c3.id), "0"])
with self.assertRaises(ValidationError):
f.clean([str(self.c1.id), "0"])
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name="Third")
self.assertCountEqual(
list(f.choices),
[(self.c1.pk, "Entertainment"), (self.c2.pk, "It's a test")],
)
self.assertSequenceEqual(f.clean([self.c2.id]), [self.c2])
with self.assertRaises(ValidationError):
f.clean([self.c3.id])
with self.assertRaises(ValidationError):
f.clean([str(self.c2.id), str(self.c3.id)])
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "multicategory " + str(obj)
self.assertCountEqual(
list(f.choices),
[
(self.c1.pk, "multicategory Entertainment"),
(self.c2.pk, "multicategory It's a test"),
(self.c3.pk, "multicategory Third"),
],
)
def test_model_multiple_choice_number_of_queries(self):
"""
ModelMultipleChoiceField does O(1) queries instead of O(n) (#10156).
"""
persons = [Writer.objects.create(name="Person %s" % i) for i in range(30)]
f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all())
self.assertNumQueries(1, f.clean, [p.pk for p in persons[1:11:2]])
def test_model_multiple_choice_run_validators(self):
"""
ModelMultipleChoiceField run given validators (#14144).
"""
for i in range(30):
Writer.objects.create(name="Person %s" % i)
self._validator_run = False
def my_validator(value):
self._validator_run = True
f = forms.ModelMultipleChoiceField(
queryset=Writer.objects.all(), validators=[my_validator]
)
f.clean([p.pk for p in Writer.objects.all()[8:9]])
self.assertTrue(self._validator_run)
def test_model_multiple_choice_show_hidden_initial(self):
"""
Test support of show_hidden_initial by ModelMultipleChoiceField.
"""
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(
show_hidden_initial=True, queryset=Writer.objects.all()
)
person1 = Writer.objects.create(name="Person 1")
person2 = Writer.objects.create(name="Person 2")
form = WriterForm(
initial={"persons": [person1, person2]},
data={
"initial-persons": [str(person1.pk), str(person2.pk)],
"persons": [str(person1.pk), str(person2.pk)],
},
)
self.assertTrue(form.is_valid())
self.assertFalse(form.has_changed())
form = WriterForm(
initial={"persons": [person1, person2]},
data={
"initial-persons": [str(person1.pk), str(person2.pk)],
"persons": [str(person2.pk)],
},
)
self.assertTrue(form.is_valid())
self.assertTrue(form.has_changed())
def test_model_multiple_choice_field_22745(self):
"""
#22745 -- Make sure that ModelMultipleChoiceField with
CheckboxSelectMultiple widget doesn't produce unnecessary db queries
when accessing its BoundField's attrs.
"""
class ModelMultipleChoiceForm(forms.Form):
categories = forms.ModelMultipleChoiceField(
Category.objects.all(), widget=forms.CheckboxSelectMultiple
)
form = ModelMultipleChoiceForm()
field = form["categories"] # BoundField
template = Template("{{ field.name }}{{ field }}{{ field.help_text }}")
with self.assertNumQueries(1):
template.render(Context({"field": field}))
def test_show_hidden_initial_changed_queries_efficiently(self):
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(
show_hidden_initial=True, queryset=Writer.objects.all()
)
writers = (Writer.objects.create(name=str(x)) for x in range(0, 50))
writer_pks = tuple(x.pk for x in writers)
form = WriterForm(data={"initial-persons": writer_pks})
with self.assertNumQueries(1):
self.assertTrue(form.has_changed())
def test_clean_does_deduplicate_values(self):
class PersonForm(forms.Form):
persons = forms.ModelMultipleChoiceField(queryset=Person.objects.all())
person1 = Person.objects.create(name="Person 1")
form = PersonForm(data={})
queryset = form.fields["persons"].clean([str(person1.pk)] * 50)
sql, params = queryset.query.sql_with_params()
self.assertEqual(len(params), 1)
def test_to_field_name_with_initial_data(self):
class ArticleCategoriesForm(forms.ModelForm):
categories = forms.ModelMultipleChoiceField(
Category.objects.all(), to_field_name="slug"
)
class Meta:
model = Article
fields = ["categories"]
article = Article.objects.create(
headline="Test article",
slug="test-article",
pub_date=datetime.date(1988, 1, 4),
writer=Writer.objects.create(name="Test writer"),
article="Hello.",
)
article.categories.add(self.c2, self.c3)
form = ArticleCategoriesForm(instance=article)
self.assertCountEqual(form["categories"].value(), [self.c2.slug, self.c3.slug])
class ModelOneToOneFieldTests(TestCase):
def test_modelform_onetoonefield(self):
class ImprovedArticleForm(forms.ModelForm):
class Meta:
model = ImprovedArticle
fields = "__all__"
class ImprovedArticleWithParentLinkForm(forms.ModelForm):
class Meta:
model = ImprovedArticleWithParentLink
fields = "__all__"
self.assertEqual(list(ImprovedArticleForm.base_fields), ["article"])
self.assertEqual(list(ImprovedArticleWithParentLinkForm.base_fields), [])
def test_modelform_subclassed_model(self):
class BetterWriterForm(forms.ModelForm):
class Meta:
# BetterWriter model is a subclass of Writer with an additional
# `score` field.
model = BetterWriter
fields = "__all__"
bw = BetterWriter.objects.create(name="Joe Better", score=10)
self.assertEqual(
sorted(model_to_dict(bw)), ["id", "name", "score", "writer_ptr"]
)
self.assertEqual(sorted(model_to_dict(bw, fields=[])), [])
self.assertEqual(
sorted(model_to_dict(bw, fields=["id", "name"])), ["id", "name"]
)
self.assertEqual(
sorted(model_to_dict(bw, exclude=[])), ["id", "name", "score", "writer_ptr"]
)
self.assertEqual(
sorted(model_to_dict(bw, exclude=["id", "name"])), ["score", "writer_ptr"]
)
form = BetterWriterForm({"name": "Some Name", "score": 12})
self.assertTrue(form.is_valid())
bw2 = form.save()
self.assertEqual(bw2.score, 12)
def test_onetoonefield(self):
class WriterProfileForm(forms.ModelForm):
class Meta:
# WriterProfile has a OneToOneField to Writer
model = WriterProfile
fields = "__all__"
self.w_royko = Writer.objects.create(name="Mike Royko")
self.w_woodward = Writer.objects.create(name="Bob Woodward")
form = WriterProfileForm()
self.assertHTMLEqual(
form.as_p(),
"""
<p><label for="id_writer">Writer:</label>
<select name="writer" id="id_writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label>
<input type="number" name="age" id="id_age" min="0" required></p>
"""
% (
self.w_woodward.pk,
self.w_royko.pk,
),
)
data = {
"writer": str(self.w_woodward.pk),
"age": "65",
}
form = WriterProfileForm(data)
instance = form.save()
self.assertEqual(str(instance), "Bob Woodward is 65")
form = WriterProfileForm(instance=instance)
self.assertHTMLEqual(
form.as_p(),
"""
<p><label for="id_writer">Writer:</label>
<select name="writer" id="id_writer" required>
<option value="">---------</option>
<option value="%s" selected>Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label>
<input type="number" name="age" value="65" id="id_age" min="0" required>
</p>"""
% (
self.w_woodward.pk,
self.w_royko.pk,
),
)
def test_assignment_of_none(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = ["publication", "full_name"]
publication = Publication.objects.create(
title="Pravda", date_published=datetime.date(1991, 8, 22)
)
author = Author.objects.create(publication=publication, full_name="John Doe")
form = AuthorForm({"publication": "", "full_name": "John Doe"}, instance=author)
self.assertTrue(form.is_valid())
self.assertIsNone(form.cleaned_data["publication"])
author = form.save()
# author object returned from form still retains original publication object
# that's why we need to retrieve it from database again
new_author = Author.objects.get(pk=author.pk)
self.assertIsNone(new_author.publication)
def test_assignment_of_none_null_false(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author1
fields = ["publication", "full_name"]
publication = Publication.objects.create(
title="Pravda", date_published=datetime.date(1991, 8, 22)
)
author = Author1.objects.create(publication=publication, full_name="John Doe")
form = AuthorForm({"publication": "", "full_name": "John Doe"}, instance=author)
self.assertFalse(form.is_valid())
class FileAndImageFieldTests(TestCase):
def test_clean_false(self):
"""
If the ``clean`` method on a non-required FileField receives False as
the data (meaning clear the field value), it returns False, regardless
of the value of ``initial``.
"""
f = forms.FileField(required=False)
self.assertIs(f.clean(False), False)
self.assertIs(f.clean(False, "initial"), False)
def test_clean_false_required(self):
"""
If the ``clean`` method on a required FileField receives False as the
data, it has the same effect as None: initial is returned if non-empty,
otherwise the validation catches the lack of a required value.
"""
f = forms.FileField(required=True)
self.assertEqual(f.clean(False, "initial"), "initial")
with self.assertRaises(ValidationError):
f.clean(False)
def test_full_clear(self):
"""
Integration happy-path test that a model FileField can actually be set
and cleared via a ModelForm.
"""
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = "__all__"
form = DocumentForm()
self.assertIn('name="myfile"', str(form))
self.assertNotIn("myfile-clear", str(form))
form = DocumentForm(
files={"myfile": SimpleUploadedFile("something.txt", b"content")}
)
self.assertTrue(form.is_valid())
doc = form.save(commit=False)
self.assertEqual(doc.myfile.name, "something.txt")
form = DocumentForm(instance=doc)
self.assertIn("myfile-clear", str(form))
form = DocumentForm(instance=doc, data={"myfile-clear": "true"})
doc = form.save(commit=False)
self.assertFalse(doc.myfile)
def test_clear_and_file_contradiction(self):
"""
If the user submits a new file upload AND checks the clear checkbox,
they get a validation error, and the bound redisplay of the form still
includes the current file and the clear checkbox.
"""
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = "__all__"
form = DocumentForm(
files={"myfile": SimpleUploadedFile("something.txt", b"content")}
)
self.assertTrue(form.is_valid())
doc = form.save(commit=False)
form = DocumentForm(
instance=doc,
files={"myfile": SimpleUploadedFile("something.txt", b"content")},
data={"myfile-clear": "true"},
)
self.assertTrue(not form.is_valid())
self.assertEqual(
form.errors["myfile"],
["Please either submit a file or check the clear checkbox, not both."],
)
rendered = str(form)
self.assertIn("something.txt", rendered)
self.assertIn("myfile-clear", rendered)
def test_render_empty_file_field(self):
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = "__all__"
doc = Document.objects.create()
form = DocumentForm(instance=doc)
self.assertHTMLEqual(
str(form["myfile"]), '<input id="id_myfile" name="myfile" type="file">'
)
def test_file_field_data(self):
# Test conditions when files is either not given or empty.
f = TextFileForm(data={"description": "Assistance"})
self.assertFalse(f.is_valid())
f = TextFileForm(data={"description": "Assistance"}, files={})
self.assertFalse(f.is_valid())
# Upload a file and ensure it all works as expected.
f = TextFileForm(
data={"description": "Assistance"},
files={"file": SimpleUploadedFile("test1.txt", b"hello world")},
)
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data["file"]), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, "tests/test1.txt")
instance.file.delete()
# If the previous file has been deleted, the file name can be reused
f = TextFileForm(
data={"description": "Assistance"},
files={"file": SimpleUploadedFile("test1.txt", b"hello world")},
)
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data["file"]), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, "tests/test1.txt")
# Check if the max_length attribute has been inherited from the model.
f = TextFileForm(
data={"description": "Assistance"},
files={"file": SimpleUploadedFile("test-maxlength.txt", b"hello world")},
)
self.assertFalse(f.is_valid())
# Edit an instance that already has the file defined in the model. This will not
# save the file again, but leave it exactly as it is.
f = TextFileForm({"description": "Assistance"}, instance=instance)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data["file"].name, "tests/test1.txt")
instance = f.save()
self.assertEqual(instance.file.name, "tests/test1.txt")
# Delete the current file since this is not done by Django.
instance.file.delete()
# Override the file by uploading a new one.
f = TextFileForm(
data={"description": "Assistance"},
files={"file": SimpleUploadedFile("test2.txt", b"hello world")},
instance=instance,
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, "tests/test2.txt")
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_filefield_required_false(self):
# Test the non-required FileField
f = TextFileForm(data={"description": "Assistance"})
f.fields["file"].required = False
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, "")
f = TextFileForm(
data={"description": "Assistance"},
files={"file": SimpleUploadedFile("test3.txt", b"hello world")},
instance=instance,
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, "tests/test3.txt")
# Instance can be edited w/out re-uploading the file and existing file
# should be preserved.
f = TextFileForm({"description": "New Description"}, instance=instance)
f.fields["file"].required = False
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.description, "New Description")
self.assertEqual(instance.file.name, "tests/test3.txt")
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_custom_file_field_save(self):
"""
Regression for #11149: save_form_data should be called only once
"""
class CFFForm(forms.ModelForm):
class Meta:
model = CustomFF
fields = "__all__"
# It's enough that the form saves without error -- the custom save routine will
# generate an AssertionError if it is called more than once during save.
form = CFFForm(data={"f": None})
form.save()
def test_file_field_multiple_save(self):
"""
Simulate a file upload and check how many times Model.save() gets
called. Test for bug #639.
"""
class PhotoForm(forms.ModelForm):
class Meta:
model = Photo
fields = "__all__"
# Grab an image for testing.
filename = os.path.join(os.path.dirname(__file__), "test.png")
with open(filename, "rb") as fp:
img = fp.read()
# Fake a POST QueryDict and FILES MultiValueDict.
data = {"title": "Testing"}
files = {"image": SimpleUploadedFile("test.png", img, "image/png")}
form = PhotoForm(data=data, files=files)
p = form.save()
try:
# Check the savecount stored on the object (see the model).
self.assertEqual(p._savecount, 1)
finally:
# Delete the "uploaded" file to avoid clogging /tmp.
p = Photo.objects.get()
p.image.delete(save=False)
def test_file_path_field_blank(self):
"""FilePathField(blank=True) includes the empty option."""
class FPForm(forms.ModelForm):
class Meta:
model = FilePathModel
fields = "__all__"
form = FPForm()
self.assertEqual(
[name for _, name in form["path"].field.choices], ["---------", "models.py"]
)
@skipUnless(test_images, "Pillow not installed")
def test_image_field(self):
# ImageField and FileField are nearly identical, but they differ slightly when
# it comes to validation. This specifically tests that #6302 is fixed for
# both file fields and image fields.
with open(os.path.join(os.path.dirname(__file__), "test.png"), "rb") as fp:
image_data = fp.read()
with open(os.path.join(os.path.dirname(__file__), "test2.png"), "rb") as fp:
image_data2 = fp.read()
f = ImageFileForm(
data={"description": "An image"},
files={"image": SimpleUploadedFile("test.png", image_data)},
)
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data["image"]), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, "tests/test.png")
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
f = ImageFileForm(
data={"description": "An image"},
files={"image": SimpleUploadedFile("test.png", image_data)},
)
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data["image"]), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, "tests/test.png")
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Edit an instance that already has the (required) image defined in the
# model. This will not save the image again, but leave it exactly as it
# is.
f = ImageFileForm(data={"description": "Look, it changed"}, instance=instance)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data["image"].name, "tests/test.png")
instance = f.save()
self.assertEqual(instance.image.name, "tests/test.png")
self.assertEqual(instance.height, 16)
self.assertEqual(instance.width, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
# Override the file by uploading a new one.
f = ImageFileForm(
data={"description": "Changed it"},
files={"image": SimpleUploadedFile("test2.png", image_data2)},
instance=instance,
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, "tests/test2.png")
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
f = ImageFileForm(
data={"description": "Changed it"},
files={"image": SimpleUploadedFile("test2.png", image_data2)},
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, "tests/test2.png")
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
# Test the non-required ImageField
# Note: In Oracle, we expect a null ImageField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_imagefield_repr = ""
else:
expected_null_imagefield_repr = None
f = OptionalImageFileForm(data={"description": "Test"})
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, expected_null_imagefield_repr)
self.assertIsNone(instance.width)
self.assertIsNone(instance.height)
f = OptionalImageFileForm(
data={"description": "And a final one"},
files={"image": SimpleUploadedFile("test3.png", image_data)},
instance=instance,
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, "tests/test3.png")
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Editing the instance without re-uploading the image should not affect
# the image or its width/height properties.
f = OptionalImageFileForm({"description": "New Description"}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.description, "New Description")
self.assertEqual(instance.image.name, "tests/test3.png")
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django.
instance.image.delete()
instance.delete()
f = OptionalImageFileForm(
data={"description": "And a final one"},
files={"image": SimpleUploadedFile("test4.png", image_data2)},
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, "tests/test4.png")
self.assertEqual(instance.width, 48)
self.assertEqual(instance.height, 32)
instance.delete()
# Callable upload_to behavior that's dependent on the value of another
# field in the model.
f = ImageFileForm(
data={"description": "And a final one", "path": "foo"},
files={"image": SimpleUploadedFile("test4.png", image_data)},
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, "foo/test4.png")
instance.delete()
# Editing an instance that has an image without an extension shouldn't
# fail validation. First create:
f = NoExtensionImageFileForm(
data={"description": "An image"},
files={"image": SimpleUploadedFile("test.png", image_data)},
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, "tests/no_extension")
# Then edit:
f = NoExtensionImageFileForm(
data={"description": "Edited image"}, instance=instance
)
self.assertTrue(f.is_valid())
class ModelOtherFieldTests(SimpleTestCase):
def test_big_integer_field(self):
bif = BigIntForm({"biggie": "-9223372036854775808"})
self.assertTrue(bif.is_valid())
bif = BigIntForm({"biggie": "-9223372036854775809"})
self.assertFalse(bif.is_valid())
self.assertEqual(
bif.errors,
{
"biggie": [
"Ensure this value is greater than or equal to "
"-9223372036854775808."
]
},
)
bif = BigIntForm({"biggie": "9223372036854775807"})
self.assertTrue(bif.is_valid())
bif = BigIntForm({"biggie": "9223372036854775808"})
self.assertFalse(bif.is_valid())
self.assertEqual(
bif.errors,
{
"biggie": [
"Ensure this value is less than or equal to 9223372036854775807."
]
},
)
def test_url_on_modelform(self):
"Check basic URL field validation on model forms"
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = "__all__"
self.assertFalse(HomepageForm({"url": "foo"}).is_valid())
self.assertFalse(HomepageForm({"url": "http://"}).is_valid())
self.assertFalse(HomepageForm({"url": "http://example"}).is_valid())
self.assertFalse(HomepageForm({"url": "http://example."}).is_valid())
self.assertFalse(HomepageForm({"url": "http://com."}).is_valid())
self.assertTrue(HomepageForm({"url": "http://localhost"}).is_valid())
self.assertTrue(HomepageForm({"url": "http://example.com"}).is_valid())
self.assertTrue(HomepageForm({"url": "http://www.example.com"}).is_valid())
self.assertTrue(HomepageForm({"url": "http://www.example.com:8000"}).is_valid())
self.assertTrue(HomepageForm({"url": "http://www.example.com/test"}).is_valid())
self.assertTrue(
HomepageForm({"url": "http://www.example.com:8000/test"}).is_valid()
)
self.assertTrue(HomepageForm({"url": "http://example.com/foo/bar"}).is_valid())
def test_modelform_non_editable_field(self):
"""
When explicitly including a non-editable field in a ModelForm, the
error message should be explicit.
"""
# 'created', non-editable, is excluded by default
self.assertNotIn("created", ArticleForm().fields)
msg = (
"'created' cannot be specified for Article model form as it is a "
"non-editable field"
)
with self.assertRaisesMessage(FieldError, msg):
class InvalidArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ("headline", "created")
def test_http_prefixing(self):
"""
If the http:// prefix is omitted on form input, the field adds it again.
"""
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = "__all__"
form = HomepageForm({"url": "example.com"})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data["url"], "http://example.com")
form = HomepageForm({"url": "example.com/test"})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data["url"], "http://example.com/test")
class OtherModelFormTests(TestCase):
def test_media_on_modelform(self):
# Similar to a regular Form class you can define custom media to be used on
# the ModelForm.
f = ModelFormWithMedia()
self.assertHTMLEqual(
str(f.media),
'<link href="/some/form/css" media="all" rel="stylesheet">'
'<script src="/some/form/javascript"></script>',
)
def test_choices_type(self):
# Choices on CharField and IntegerField
f = ArticleForm()
with self.assertRaises(ValidationError):
f.fields["status"].clean("42")
f = ArticleStatusForm()
with self.assertRaises(ValidationError):
f.fields["status"].clean("z")
def test_prefetch_related_queryset(self):
"""
ModelChoiceField should respect a prefetch_related() on its queryset.
"""
blue = Colour.objects.create(name="blue")
red = Colour.objects.create(name="red")
multicolor_item = ColourfulItem.objects.create()
multicolor_item.colours.add(blue, red)
red_item = ColourfulItem.objects.create()
red_item.colours.add(red)
class ColorModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return ", ".join(c.name for c in obj.colours.all())
field = ColorModelChoiceField(ColourfulItem.objects.prefetch_related("colours"))
with self.assertNumQueries(3): # would be 4 if prefetch is ignored
self.assertEqual(
tuple(field.choices),
(
("", "---------"),
(multicolor_item.pk, "blue, red"),
(red_item.pk, "red"),
),
)
def test_foreignkeys_which_use_to_field(self):
apple = Inventory.objects.create(barcode=86, name="Apple")
pear = Inventory.objects.create(barcode=22, name="Pear")
core = Inventory.objects.create(barcode=87, name="Core", parent=apple)
field = forms.ModelChoiceField(Inventory.objects.all(), to_field_name="barcode")
self.assertEqual(
tuple(field.choices),
(("", "---------"), (86, "Apple"), (87, "Core"), (22, "Pear")),
)
form = InventoryForm(instance=core)
self.assertHTMLEqual(
str(form["parent"]),
"""<select name="parent" id="id_parent">
<option value="">---------</option>
<option value="86" selected>Apple</option>
<option value="87">Core</option>
<option value="22">Pear</option>
</select>""",
)
data = model_to_dict(core)
data["parent"] = "22"
form = InventoryForm(data=data, instance=core)
core = form.save()
self.assertEqual(core.parent.name, "Pear")
class CategoryForm(forms.ModelForm):
description = forms.CharField()
class Meta:
model = Category
fields = ["description", "url"]
self.assertEqual(list(CategoryForm.base_fields), ["description", "url"])
self.assertHTMLEqual(
str(CategoryForm()),
'<div><label for="id_description">Description:</label><input type="text" '
'name="description" required id="id_description"></div><div>'
'<label for="id_url">The URL:</label><input type="text" name="url" '
'maxlength="40" required id="id_url"></div>',
)
# to_field_name should also work on ModelMultipleChoiceField ##################
field = forms.ModelMultipleChoiceField(
Inventory.objects.all(), to_field_name="barcode"
)
self.assertEqual(
tuple(field.choices), ((86, "Apple"), (87, "Core"), (22, "Pear"))
)
self.assertSequenceEqual(field.clean([86]), [apple])
form = SelectInventoryForm({"items": [87, 22]})
self.assertTrue(form.is_valid())
self.assertEqual(len(form.cleaned_data), 1)
self.assertSequenceEqual(form.cleaned_data["items"], [core, pear])
def test_model_field_that_returns_none_to_exclude_itself_with_explicit_fields(self):
self.assertEqual(list(CustomFieldForExclusionForm.base_fields), ["name"])
self.assertHTMLEqual(
str(CustomFieldForExclusionForm()),
'<div><label for="id_name">Name:</label><input type="text" '
'name="name" maxlength="10" required id="id_name"></div>',
)
def test_iterable_model_m2m(self):
class ColourfulItemForm(forms.ModelForm):
class Meta:
model = ColourfulItem
fields = "__all__"
colour = Colour.objects.create(name="Blue")
form = ColourfulItemForm()
self.maxDiff = 1024
self.assertHTMLEqual(
form.as_p(),
"""
<p>
<label for="id_name">Name:</label>
<input id="id_name" type="text" name="name" maxlength="50" required></p>
<p><label for="id_colours">Colours:</label>
<select multiple name="colours" id="id_colours" required>
<option value="%(blue_pk)s">Blue</option>
</select></p>
"""
% {"blue_pk": colour.pk},
)
def test_callable_field_default(self):
class PublicationDefaultsForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = ("title", "date_published", "mode", "category")
self.maxDiff = 2000
form = PublicationDefaultsForm()
today_str = str(datetime.date.today())
self.assertHTMLEqual(
form.as_p(),
"""
<p><label for="id_title">Title:</label>
<input id="id_title" maxlength="30" name="title" type="text" required>
</p>
<p><label for="id_date_published">Date published:</label>
<input id="id_date_published" name="date_published" type="text" value="{0}"
required>
<input id="initial-id_date_published" name="initial-date_published"
type="hidden" value="{0}">
</p>
<p><label for="id_mode">Mode:</label> <select id="id_mode" name="mode">
<option value="di" selected>direct</option>
<option value="de">delayed</option></select>
<input id="initial-id_mode" name="initial-mode" type="hidden" value="di">
</p>
<p>
<label for="id_category">Category:</label>
<select id="id_category" name="category">
<option value="1">Games</option>
<option value="2">Comics</option>
<option value="3" selected>Novel</option></select>
<input id="initial-id_category" name="initial-category" type="hidden"
value="3">
""".format(
today_str
),
)
empty_data = {
"title": "",
"date_published": today_str,
"initial-date_published": today_str,
"mode": "di",
"initial-mode": "di",
"category": "3",
"initial-category": "3",
}
bound_form = PublicationDefaultsForm(empty_data)
self.assertFalse(bound_form.has_changed())
class ModelFormCustomErrorTests(SimpleTestCase):
def test_custom_error_messages(self):
data = {"name1": "@#$!!**@#$", "name2": "@#$!!**@#$"}
errors = CustomErrorMessageForm(data).errors
self.assertHTMLEqual(
str(errors["name1"]),
'<ul class="errorlist"><li>Form custom error message.</li></ul>',
)
self.assertHTMLEqual(
str(errors["name2"]),
'<ul class="errorlist"><li>Model custom error message.</li></ul>',
)
def test_model_clean_error_messages(self):
data = {"name1": "FORBIDDEN_VALUE", "name2": "ABC"}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertHTMLEqual(
str(form.errors["name1"]),
'<ul class="errorlist"><li>Model.clean() error messages.</li></ul>',
)
data = {"name1": "FORBIDDEN_VALUE2", "name2": "ABC"}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertHTMLEqual(
str(form.errors["name1"]),
'<ul class="errorlist">'
"<li>Model.clean() error messages (simpler syntax).</li></ul>",
)
data = {"name1": "GLOBAL_ERROR", "name2": "ABC"}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors["__all__"], ["Global error message."])
class CustomCleanTests(TestCase):
def test_override_clean(self):
"""
Regression for #12596: Calling super from ModelForm.clean() should be
optional.
"""
class TripleFormWithCleanOverride(forms.ModelForm):
class Meta:
model = Triple
fields = "__all__"
def clean(self):
if not self.cleaned_data["left"] == self.cleaned_data["right"]:
raise ValidationError("Left and right should be equal")
return self.cleaned_data
form = TripleFormWithCleanOverride({"left": 1, "middle": 2, "right": 1})
self.assertTrue(form.is_valid())
# form.instance.left will be None if the instance was not constructed
# by form.full_clean().
self.assertEqual(form.instance.left, 1)
def test_model_form_clean_applies_to_model(self):
"""
Regression test for #12960. Make sure the cleaned_data returned from
ModelForm.clean() is applied to the model instance.
"""
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = "__all__"
def clean(self):
self.cleaned_data["name"] = self.cleaned_data["name"].upper()
return self.cleaned_data
data = {"name": "Test", "slug": "test", "url": "/test"}
form = CategoryForm(data)
category = form.save()
self.assertEqual(category.name, "TEST")
class ModelFormInheritanceTests(SimpleTestCase):
def test_form_subclass_inheritance(self):
class Form(forms.Form):
age = forms.IntegerField()
class ModelForm(forms.ModelForm, Form):
class Meta:
model = Writer
fields = "__all__"
self.assertEqual(list(ModelForm().fields), ["name", "age"])
def test_field_removal(self):
class ModelForm(forms.ModelForm):
class Meta:
model = Writer
fields = "__all__"
class Mixin:
age = None
class Form(forms.Form):
age = forms.IntegerField()
class Form2(forms.Form):
foo = forms.IntegerField()
self.assertEqual(list(ModelForm().fields), ["name"])
self.assertEqual(list(type("NewForm", (Mixin, Form), {})().fields), [])
self.assertEqual(
list(type("NewForm", (Form2, Mixin, Form), {})().fields), ["foo"]
)
self.assertEqual(
list(type("NewForm", (Mixin, ModelForm, Form), {})().fields), ["name"]
)
self.assertEqual(
list(type("NewForm", (ModelForm, Mixin, Form), {})().fields), ["name"]
)
self.assertEqual(
list(type("NewForm", (ModelForm, Form, Mixin), {})().fields),
["name", "age"],
)
self.assertEqual(
list(type("NewForm", (ModelForm, Form), {"age": None})().fields), ["name"]
)
def test_field_removal_name_clashes(self):
"""
Form fields can be removed in subclasses by setting them to None
(#22510).
"""
class MyForm(forms.ModelForm):
media = forms.CharField()
class Meta:
model = Writer
fields = "__all__"
class SubForm(MyForm):
media = None
self.assertIn("media", MyForm().fields)
self.assertNotIn("media", SubForm().fields)
self.assertTrue(hasattr(MyForm, "media"))
self.assertTrue(hasattr(SubForm, "media"))
class StumpJokeForm(forms.ModelForm):
class Meta:
model = StumpJoke
fields = "__all__"
class CustomFieldWithQuerysetButNoLimitChoicesTo(forms.Field):
queryset = 42
class StumpJokeWithCustomFieldForm(forms.ModelForm):
custom = CustomFieldWithQuerysetButNoLimitChoicesTo()
class Meta:
model = StumpJoke
fields = ()
class LimitChoicesToTests(TestCase):
"""
Tests the functionality of ``limit_choices_to``.
"""
@classmethod
def setUpTestData(cls):
cls.threepwood = Character.objects.create(
username="threepwood",
last_action=datetime.datetime.today() + datetime.timedelta(days=1),
)
cls.marley = Character.objects.create(
username="marley",
last_action=datetime.datetime.today() - datetime.timedelta(days=1),
)
def test_limit_choices_to_callable_for_fk_rel(self):
"""
A ForeignKey can use limit_choices_to as a callable (#2554).
"""
stumpjokeform = StumpJokeForm()
self.assertSequenceEqual(
stumpjokeform.fields["most_recently_fooled"].queryset, [self.threepwood]
)
def test_limit_choices_to_callable_for_m2m_rel(self):
"""
A ManyToManyField can use limit_choices_to as a callable (#2554).
"""
stumpjokeform = StumpJokeForm()
self.assertSequenceEqual(
stumpjokeform.fields["most_recently_fooled"].queryset, [self.threepwood]
)
def test_custom_field_with_queryset_but_no_limit_choices_to(self):
"""
A custom field with a `queryset` attribute but no `limit_choices_to`
works (#23795).
"""
f = StumpJokeWithCustomFieldForm()
self.assertEqual(f.fields["custom"].queryset, 42)
def test_fields_for_model_applies_limit_choices_to(self):
fields = fields_for_model(StumpJoke, ["has_fooled_today"])
self.assertSequenceEqual(fields["has_fooled_today"].queryset, [self.threepwood])
def test_callable_called_each_time_form_is_instantiated(self):
field = StumpJokeForm.base_fields["most_recently_fooled"]
with mock.patch.object(field, "limit_choices_to") as today_callable_dict:
StumpJokeForm()
self.assertEqual(today_callable_dict.call_count, 1)
StumpJokeForm()
self.assertEqual(today_callable_dict.call_count, 2)
StumpJokeForm()
self.assertEqual(today_callable_dict.call_count, 3)
@isolate_apps("model_forms")
def test_limit_choices_to_no_duplicates(self):
joke1 = StumpJoke.objects.create(
funny=True,
most_recently_fooled=self.threepwood,
)
joke2 = StumpJoke.objects.create(
funny=True,
most_recently_fooled=self.threepwood,
)
joke3 = StumpJoke.objects.create(
funny=True,
most_recently_fooled=self.marley,
)
StumpJoke.objects.create(funny=False, most_recently_fooled=self.marley)
joke1.has_fooled_today.add(self.marley, self.threepwood)
joke2.has_fooled_today.add(self.marley)
joke3.has_fooled_today.add(self.marley, self.threepwood)
class CharacterDetails(models.Model):
character1 = models.ForeignKey(
Character,
models.CASCADE,
limit_choices_to=models.Q(
jokes__funny=True,
jokes_today__funny=True,
),
related_name="details_fk_1",
)
character2 = models.ForeignKey(
Character,
models.CASCADE,
limit_choices_to={
"jokes__funny": True,
"jokes_today__funny": True,
},
related_name="details_fk_2",
)
character3 = models.ManyToManyField(
Character,
limit_choices_to=models.Q(
jokes__funny=True,
jokes_today__funny=True,
),
related_name="details_m2m_1",
)
class CharacterDetailsForm(forms.ModelForm):
class Meta:
model = CharacterDetails
fields = "__all__"
form = CharacterDetailsForm()
self.assertCountEqual(
form.fields["character1"].queryset,
[self.marley, self.threepwood],
)
self.assertCountEqual(
form.fields["character2"].queryset,
[self.marley, self.threepwood],
)
self.assertCountEqual(
form.fields["character3"].queryset,
[self.marley, self.threepwood],
)
def test_limit_choices_to_m2m_through(self):
class DiceForm(forms.ModelForm):
class Meta:
model = Dice
fields = ["numbers"]
Number.objects.create(value=0)
n1 = Number.objects.create(value=1)
n2 = Number.objects.create(value=2)
form = DiceForm()
self.assertCountEqual(form.fields["numbers"].queryset, [n1, n2])
class FormFieldCallbackTests(SimpleTestCase):
def test_baseform_with_widgets_in_meta(self):
"""
Using base forms with widgets defined in Meta should not raise errors.
"""
widget = forms.Textarea()
class BaseForm(forms.ModelForm):
class Meta:
model = Person
widgets = {"name": widget}
fields = "__all__"
Form = modelform_factory(Person, form=BaseForm)
self.assertIsInstance(Form.base_fields["name"].widget, forms.Textarea)
def test_factory_with_widget_argument(self):
"""Regression for #15315: modelform_factory should accept widgets
argument
"""
widget = forms.Textarea()
# Without a widget should not set the widget to textarea
Form = modelform_factory(Person, fields="__all__")
self.assertNotEqual(Form.base_fields["name"].widget.__class__, forms.Textarea)
# With a widget should not set the widget to textarea
Form = modelform_factory(Person, fields="__all__", widgets={"name": widget})
self.assertEqual(Form.base_fields["name"].widget.__class__, forms.Textarea)
def test_modelform_factory_without_fields(self):
"""Regression for #19733"""
message = (
"Calling modelform_factory without defining 'fields' or 'exclude' "
"explicitly is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
modelform_factory(Person)
def test_modelform_factory_with_all_fields(self):
"""Regression for #19733"""
form = modelform_factory(Person, fields="__all__")
self.assertEqual(list(form.base_fields), ["name"])
def test_custom_callback(self):
"""A custom formfield_callback is used if provided"""
callback_args = []
def callback(db_field, **kwargs):
callback_args.append((db_field, kwargs))
return db_field.formfield(**kwargs)
widget = forms.Textarea()
class BaseForm(forms.ModelForm):
class Meta:
model = Person
widgets = {"name": widget}
fields = "__all__"
modelform_factory(Person, form=BaseForm, formfield_callback=callback)
id_field, name_field = Person._meta.fields
self.assertEqual(
callback_args, [(id_field, {}), (name_field, {"widget": widget})]
)
def test_bad_callback(self):
# A bad callback provided by user still gives an error
with self.assertRaises(TypeError):
modelform_factory(
Person,
fields="__all__",
formfield_callback="not a function or callable",
)
def test_inherit_after_custom_callback(self):
def callback(db_field, **kwargs):
if isinstance(db_field, models.CharField):
return forms.CharField(widget=forms.Textarea)
return db_field.formfield(**kwargs)
class BaseForm(forms.ModelForm):
class Meta:
model = Person
fields = "__all__"
NewForm = modelform_factory(Person, form=BaseForm, formfield_callback=callback)
class InheritedForm(NewForm):
pass
for name in NewForm.base_fields:
self.assertEqual(
type(InheritedForm.base_fields[name].widget),
type(NewForm.base_fields[name].widget),
)
class LocalizedModelFormTest(TestCase):
def test_model_form_applies_localize_to_some_fields(self):
class PartiallyLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = (
"left",
"right",
)
fields = "__all__"
f = PartiallyLocalizedTripleForm({"left": 10, "middle": 10, "right": 10})
self.assertTrue(f.is_valid())
self.assertTrue(f.fields["left"].localize)
self.assertFalse(f.fields["middle"].localize)
self.assertTrue(f.fields["right"].localize)
def test_model_form_applies_localize_to_all_fields(self):
class FullyLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = "__all__"
fields = "__all__"
f = FullyLocalizedTripleForm({"left": 10, "middle": 10, "right": 10})
self.assertTrue(f.is_valid())
self.assertTrue(f.fields["left"].localize)
self.assertTrue(f.fields["middle"].localize)
self.assertTrue(f.fields["right"].localize)
def test_model_form_refuses_arbitrary_string(self):
msg = (
"BrokenLocalizedTripleForm.Meta.localized_fields "
"cannot be a string. Did you mean to type: ('foo',)?"
)
with self.assertRaisesMessage(TypeError, msg):
class BrokenLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = "foo"
class CustomMetaclass(ModelFormMetaclass):
def __new__(cls, name, bases, attrs):
new = super().__new__(cls, name, bases, attrs)
new.base_fields = {}
return new
class CustomMetaclassForm(forms.ModelForm, metaclass=CustomMetaclass):
pass
class CustomMetaclassTestCase(SimpleTestCase):
def test_modelform_factory_metaclass(self):
new_cls = modelform_factory(Person, fields="__all__", form=CustomMetaclassForm)
self.assertEqual(new_cls.base_fields, {})
class StrictAssignmentTests(SimpleTestCase):
"""
Should a model do anything special with __setattr__() or descriptors which
raise a ValidationError, a model form should catch the error (#24706).
"""
def test_setattr_raises_validation_error_field_specific(self):
"""
A model ValidationError using the dict form should put the error
message into the correct key of form.errors.
"""
form_class = modelform_factory(
model=StrictAssignmentFieldSpecific, fields=["title"]
)
form = form_class(data={"title": "testing setattr"}, files=None)
# This line turns on the ValidationError; it avoids the model erroring
# when its own __init__() is called when creating form.instance.
form.instance._should_error = True
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{"title": ["Cannot set attribute", "This field cannot be blank."]},
)
def test_setattr_raises_validation_error_non_field(self):
"""
A model ValidationError not using the dict form should put the error
message into __all__ (i.e. non-field errors) on the form.
"""
form_class = modelform_factory(model=StrictAssignmentAll, fields=["title"])
form = form_class(data={"title": "testing setattr"}, files=None)
# This line turns on the ValidationError; it avoids the model erroring
# when its own __init__() is called when creating form.instance.
form.instance._should_error = True
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
"__all__": ["Cannot set attribute"],
"title": ["This field cannot be blank."],
},
)
class ModelToDictTests(TestCase):
def test_many_to_many(self):
"""Data for a ManyToManyField is a list rather than a lazy QuerySet."""
blue = Colour.objects.create(name="blue")
red = Colour.objects.create(name="red")
item = ColourfulItem.objects.create()
item.colours.set([blue])
data = model_to_dict(item)["colours"]
self.assertEqual(data, [blue])
item.colours.set([red])
# If data were a QuerySet, it would be reevaluated here and give "red"
# instead of the original value.
self.assertEqual(data, [blue])
|
9c1420042e5dedc9807dc7d8b0332d1ca3b2015995e0c37ba383202cd4f42a9f | import re
from django.contrib.gis import forms
from django.contrib.gis.forms import BaseGeometryWidget, OpenLayersWidget
from django.contrib.gis.geos import GEOSGeometry
from django.core.exceptions import ValidationError
from django.test import SimpleTestCase, override_settings
from django.utils.html import escape
class GeometryFieldTest(SimpleTestCase):
def test_init(self):
"Testing GeometryField initialization with defaults."
fld = forms.GeometryField()
for bad_default in ("blah", 3, "FoO", None, 0):
with self.subTest(bad_default=bad_default):
with self.assertRaises(ValidationError):
fld.clean(bad_default)
def test_srid(self):
"Testing GeometryField with a SRID set."
# Input that doesn't specify the SRID is assumed to be in the SRID
# of the input field.
fld = forms.GeometryField(srid=4326)
geom = fld.clean("POINT(5 23)")
self.assertEqual(4326, geom.srid)
# Making the field in a different SRID from that of the geometry, and
# asserting it transforms.
fld = forms.GeometryField(srid=32140)
# Different PROJ versions use different transformations, all are
# correct as having a 1 meter accuracy.
tol = 1
xform_geom = GEOSGeometry(
"POINT (951640.547328465 4219369.26171664)", srid=32140
)
# The cleaned geometry is transformed to 32140 (the widget map_srid is 3857).
cleaned_geom = fld.clean(
"SRID=3857;POINT (-10615777.40976205 3473169.895707852)"
)
self.assertEqual(cleaned_geom.srid, 32140)
self.assertTrue(xform_geom.equals_exact(cleaned_geom, tol))
def test_null(self):
"Testing GeometryField's handling of null (None) geometries."
# Form fields, by default, are required (`required=True`)
fld = forms.GeometryField()
with self.assertRaisesMessage(ValidationError, "No geometry value provided."):
fld.clean(None)
# This will clean None as a geometry (See #10660).
fld = forms.GeometryField(required=False)
self.assertIsNone(fld.clean(None))
def test_geom_type(self):
"Testing GeometryField's handling of different geometry types."
# By default, all geometry types are allowed.
fld = forms.GeometryField()
for wkt in (
"POINT(5 23)",
"MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))",
"LINESTRING(0 0, 1 1)",
):
with self.subTest(wkt=wkt):
# to_python() uses the SRID of OpenLayersWidget if the
# converted value doesn't have an SRID.
self.assertEqual(
GEOSGeometry(wkt, srid=fld.widget.map_srid), fld.clean(wkt)
)
pnt_fld = forms.GeometryField(geom_type="POINT")
self.assertEqual(
GEOSGeometry("POINT(5 23)", srid=pnt_fld.widget.map_srid),
pnt_fld.clean("POINT(5 23)"),
)
# a WKT for any other geom_type will be properly transformed by `to_python`
self.assertEqual(
GEOSGeometry("LINESTRING(0 0, 1 1)", srid=pnt_fld.widget.map_srid),
pnt_fld.to_python("LINESTRING(0 0, 1 1)"),
)
# but rejected by `clean`
with self.assertRaises(ValidationError):
pnt_fld.clean("LINESTRING(0 0, 1 1)")
def test_to_python(self):
"""
to_python() either returns a correct GEOSGeometry object or
a ValidationError.
"""
good_inputs = [
"POINT(5 23)",
"MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))",
"LINESTRING(0 0, 1 1)",
]
bad_inputs = [
"POINT(5)",
"MULTI POLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))",
"BLAH(0 0, 1 1)",
'{"type": "FeatureCollection", "features": ['
'{"geometry": {"type": "Point", "coordinates": [508375, 148905]}, '
'"type": "Feature"}]}',
]
fld = forms.GeometryField()
# to_python returns the same GEOSGeometry for a WKT
for geo_input in good_inputs:
with self.subTest(geo_input=geo_input):
self.assertEqual(
GEOSGeometry(geo_input, srid=fld.widget.map_srid),
fld.to_python(geo_input),
)
# but raises a ValidationError for any other string
for geo_input in bad_inputs:
with self.subTest(geo_input=geo_input):
with self.assertRaises(ValidationError):
fld.to_python(geo_input)
def test_to_python_different_map_srid(self):
f = forms.GeometryField(widget=OpenLayersWidget)
json = '{ "type": "Point", "coordinates": [ 5.0, 23.0 ] }'
self.assertEqual(
GEOSGeometry("POINT(5 23)", srid=f.widget.map_srid), f.to_python(json)
)
def test_field_with_text_widget(self):
class PointForm(forms.Form):
pt = forms.PointField(srid=4326, widget=forms.TextInput)
form = PointForm()
cleaned_pt = form.fields["pt"].clean("POINT(5 23)")
self.assertEqual(cleaned_pt, GEOSGeometry("POINT(5 23)", srid=4326))
self.assertEqual(4326, cleaned_pt.srid)
with self.assertRaisesMessage(ValidationError, "Invalid geometry value."):
form.fields["pt"].clean("POINT(5)")
point = GEOSGeometry("SRID=4326;POINT(5 23)")
form = PointForm(data={"pt": "POINT(5 23)"}, initial={"pt": point})
self.assertFalse(form.has_changed())
def test_field_string_value(self):
"""
Initialization of a geometry field with a valid/empty/invalid string.
Only the invalid string should trigger an error log entry.
"""
class PointForm(forms.Form):
pt1 = forms.PointField(srid=4326)
pt2 = forms.PointField(srid=4326)
pt3 = forms.PointField(srid=4326)
form = PointForm(
{
"pt1": "SRID=4326;POINT(7.3 44)", # valid
"pt2": "", # empty
"pt3": "PNT(0)", # invalid
}
)
with self.assertLogs("django.contrib.gis", "ERROR") as logger_calls:
output = str(form)
# The first point can't use assertInHTML() due to non-deterministic
# ordering of the rendered dictionary.
pt1_serialized = re.search(r"<textarea [^>]*>({[^<]+})<", output)[1]
pt1_json = pt1_serialized.replace(""", '"')
pt1_expected = GEOSGeometry(form.data["pt1"]).transform(3857, clone=True)
self.assertJSONEqual(pt1_json, pt1_expected.json)
self.assertInHTML(
'<textarea id="id_pt2" class="vSerializedField required" cols="150"'
' rows="10" name="pt2" hidden></textarea>',
output,
)
self.assertInHTML(
'<textarea id="id_pt3" class="vSerializedField required" cols="150"'
' rows="10" name="pt3" hidden></textarea>',
output,
)
# Only the invalid PNT(0) triggers an error log entry.
# Deserialization is called in form clean and in widget rendering.
self.assertEqual(len(logger_calls.records), 2)
self.assertEqual(
logger_calls.records[0].getMessage(),
"Error creating geometry from value 'PNT(0)' (String input "
"unrecognized as WKT EWKT, and HEXEWKB.)",
)
class SpecializedFieldTest(SimpleTestCase):
def setUp(self):
self.geometries = {
"point": GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)"),
"multipoint": GEOSGeometry(
"SRID=4326;MULTIPOINT("
"(13.18634033203125 14.504356384277344),"
"(13.207969665527 14.490966796875),"
"(13.177070617675 14.454917907714))"
),
"linestring": GEOSGeometry(
"SRID=4326;LINESTRING("
"-8.26171875 -0.52734375,"
"-7.734375 4.21875,"
"6.85546875 3.779296875,"
"5.44921875 -3.515625)"
),
"multilinestring": GEOSGeometry(
"SRID=4326;MULTILINESTRING("
"(-16.435546875 -2.98828125,"
"-17.2265625 2.98828125,"
"-0.703125 3.515625,"
"-1.494140625 -3.33984375),"
"(-8.0859375 -5.9765625,"
"8.525390625 -8.7890625,"
"12.392578125 -0.87890625,"
"10.01953125 7.646484375))"
),
"polygon": GEOSGeometry(
"SRID=4326;POLYGON("
"(-1.669921875 6.240234375,"
"-3.8671875 -0.615234375,"
"5.9765625 -3.955078125,"
"18.193359375 3.955078125,"
"9.84375 9.4921875,"
"-1.669921875 6.240234375))"
),
"multipolygon": GEOSGeometry(
"SRID=4326;MULTIPOLYGON("
"((-17.578125 13.095703125,"
"-17.2265625 10.8984375,"
"-13.974609375 10.1953125,"
"-13.359375 12.744140625,"
"-15.732421875 13.7109375,"
"-17.578125 13.095703125)),"
"((-8.525390625 5.537109375,"
"-8.876953125 2.548828125,"
"-5.888671875 1.93359375,"
"-5.09765625 4.21875,"
"-6.064453125 6.240234375,"
"-8.525390625 5.537109375)))"
),
"geometrycollection": GEOSGeometry(
"SRID=4326;GEOMETRYCOLLECTION("
"POINT(5.625 -0.263671875),"
"POINT(6.767578125 -3.603515625),"
"POINT(8.525390625 0.087890625),"
"POINT(8.0859375 -2.13134765625),"
"LINESTRING("
"6.273193359375 -1.175537109375,"
"5.77880859375 -1.812744140625,"
"7.27294921875 -2.230224609375,"
"7.657470703125 -1.25244140625))"
),
}
def assertMapWidget(self, form_instance):
"""
Make sure the MapWidget js is passed in the form media and a MapWidget
is actually created
"""
self.assertTrue(form_instance.is_valid())
rendered = form_instance.as_p()
self.assertIn("new MapWidget(options);", rendered)
self.assertIn("map_srid: 3857,", rendered)
self.assertIn("gis/js/OLMapWidget.js", str(form_instance.media))
def assertTextarea(self, geom, rendered):
"""Makes sure the wkt and a textarea are in the content"""
self.assertIn("<textarea ", rendered)
self.assertIn("required", rendered)
ogr = geom.ogr
ogr.transform(3857)
self.assertIn(escape(ogr.json), rendered)
# map_srid in openlayers.html template must not be localized.
@override_settings(USE_THOUSAND_SEPARATOR=True)
def test_pointfield(self):
class PointForm(forms.Form):
p = forms.PointField()
geom = self.geometries["point"]
form = PointForm(data={"p": geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PointForm().is_valid())
invalid = PointForm(data={"p": "some invalid geom"})
self.assertFalse(invalid.is_valid())
self.assertIn("Invalid geometry value", str(invalid.errors))
for invalid in [geo for key, geo in self.geometries.items() if key != "point"]:
self.assertFalse(PointForm(data={"p": invalid.wkt}).is_valid())
def test_multipointfield(self):
class PointForm(forms.Form):
p = forms.MultiPointField()
geom = self.geometries["multipoint"]
form = PointForm(data={"p": geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PointForm().is_valid())
for invalid in [
geo for key, geo in self.geometries.items() if key != "multipoint"
]:
self.assertFalse(PointForm(data={"p": invalid.wkt}).is_valid())
def test_linestringfield(self):
class LineStringForm(forms.Form):
f = forms.LineStringField()
geom = self.geometries["linestring"]
form = LineStringForm(data={"f": geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(LineStringForm().is_valid())
for invalid in [
geo for key, geo in self.geometries.items() if key != "linestring"
]:
self.assertFalse(LineStringForm(data={"p": invalid.wkt}).is_valid())
def test_multilinestringfield(self):
class LineStringForm(forms.Form):
f = forms.MultiLineStringField()
geom = self.geometries["multilinestring"]
form = LineStringForm(data={"f": geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(LineStringForm().is_valid())
for invalid in [
geo for key, geo in self.geometries.items() if key != "multilinestring"
]:
self.assertFalse(LineStringForm(data={"p": invalid.wkt}).is_valid())
def test_polygonfield(self):
class PolygonForm(forms.Form):
p = forms.PolygonField()
geom = self.geometries["polygon"]
form = PolygonForm(data={"p": geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PolygonForm().is_valid())
for invalid in [
geo for key, geo in self.geometries.items() if key != "polygon"
]:
self.assertFalse(PolygonForm(data={"p": invalid.wkt}).is_valid())
def test_multipolygonfield(self):
class PolygonForm(forms.Form):
p = forms.MultiPolygonField()
geom = self.geometries["multipolygon"]
form = PolygonForm(data={"p": geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PolygonForm().is_valid())
for invalid in [
geo for key, geo in self.geometries.items() if key != "multipolygon"
]:
self.assertFalse(PolygonForm(data={"p": invalid.wkt}).is_valid())
def test_geometrycollectionfield(self):
class GeometryForm(forms.Form):
g = forms.GeometryCollectionField()
geom = self.geometries["geometrycollection"]
form = GeometryForm(data={"g": geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(GeometryForm().is_valid())
for invalid in [
geo for key, geo in self.geometries.items() if key != "geometrycollection"
]:
self.assertFalse(GeometryForm(data={"g": invalid.wkt}).is_valid())
class OSMWidgetTest(SimpleTestCase):
def setUp(self):
self.geometries = {
"point": GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)"),
}
def test_osm_widget(self):
class PointForm(forms.Form):
p = forms.PointField(widget=forms.OSMWidget)
geom = self.geometries["point"]
form = PointForm(data={"p": geom})
rendered = form.as_p()
self.assertIn("ol.source.OSM()", rendered)
self.assertIn("id: 'id_p',", rendered)
def test_default_lat_lon(self):
self.assertEqual(forms.OSMWidget.default_lon, 5)
self.assertEqual(forms.OSMWidget.default_lat, 47)
self.assertEqual(forms.OSMWidget.default_zoom, 12)
class PointForm(forms.Form):
p = forms.PointField(
widget=forms.OSMWidget(
attrs={
"default_lon": 20,
"default_lat": 30,
"default_zoom": 17,
}
),
)
form = PointForm()
rendered = form.as_p()
self.assertIn("options['default_lon'] = 20;", rendered)
self.assertIn("options['default_lat'] = 30;", rendered)
self.assertIn("options['default_zoom'] = 17;", rendered)
class GeometryWidgetTests(SimpleTestCase):
def test_get_context_attrs(self):
# The Widget.get_context() attrs argument overrides self.attrs.
widget = BaseGeometryWidget(attrs={"geom_type": "POINT"})
context = widget.get_context("point", None, attrs={"geom_type": "POINT2"})
self.assertEqual(context["geom_type"], "POINT2")
# Widget.get_context() returns expected name for geom_type.
widget = BaseGeometryWidget(attrs={"geom_type": "POLYGON"})
context = widget.get_context("polygon", None, None)
self.assertEqual(context["geom_type"], "Polygon")
# Widget.get_context() returns 'Geometry' instead of 'Unknown'.
widget = BaseGeometryWidget(attrs={"geom_type": "GEOMETRY"})
context = widget.get_context("geometry", None, None)
self.assertEqual(context["geom_type"], "Geometry")
def test_subwidgets(self):
widget = forms.BaseGeometryWidget()
self.assertEqual(
list(widget.subwidgets("name", "value")),
[
{
"is_hidden": False,
"attrs": {
"map_srid": 4326,
"map_width": 600,
"geom_type": "GEOMETRY",
"map_height": 400,
"display_raw": False,
},
"name": "name",
"template_name": "",
"value": "value",
"required": False,
}
],
)
def test_custom_serialization_widget(self):
class CustomGeometryWidget(forms.BaseGeometryWidget):
template_name = "gis/openlayers.html"
deserialize_called = 0
def serialize(self, value):
return value.json if value else ""
def deserialize(self, value):
self.deserialize_called += 1
return GEOSGeometry(value)
class PointForm(forms.Form):
p = forms.PointField(widget=CustomGeometryWidget)
point = GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)")
form = PointForm(data={"p": point})
self.assertIn(escape(point.json), form.as_p())
CustomGeometryWidget.called = 0
widget = form.fields["p"].widget
# Force deserialize use due to a string value
self.assertIn(escape(point.json), widget.render("p", point.json))
self.assertEqual(widget.deserialize_called, 1)
form = PointForm(data={"p": point.json})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data["p"].srid, 4326)
|
14f05e6802b36fbd857cab519e45f086099f62c9baeab3279ef99ff1592fb38e | import datetime
import decimal
import gettext as gettext_module
import os
import pickle
import re
import tempfile
from contextlib import contextmanager
from importlib import import_module
from pathlib import Path
from unittest import mock
from asgiref.local import Local
from django import forms
from django.apps import AppConfig
from django.conf import settings
from django.conf.locale import LANG_INFO
from django.conf.urls.i18n import i18n_patterns
from django.template import Context, Template
from django.test import (
RequestFactory,
SimpleTestCase,
TestCase,
ignore_warnings,
override_settings,
)
from django.utils import translation
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.formats import (
date_format,
get_format,
iter_format_modules,
localize,
localize_input,
reset_format_cache,
sanitize_separators,
sanitize_strftime_format,
time_format,
)
from django.utils.numberformat import format as nformat
from django.utils.safestring import SafeString, mark_safe
from django.utils.translation import (
activate,
check_for_language,
deactivate,
get_language,
get_language_bidi,
get_language_from_request,
get_language_info,
gettext,
gettext_lazy,
ngettext,
ngettext_lazy,
npgettext,
npgettext_lazy,
pgettext,
round_away_from_one,
to_language,
to_locale,
trans_null,
trans_real,
)
from django.utils.translation.reloader import (
translation_file_changed,
watch_for_translation_changes,
)
from .forms import CompanyForm, I18nForm, SelectDateForm
from .models import Company, TestModel
here = os.path.dirname(os.path.abspath(__file__))
extended_locale_paths = settings.LOCALE_PATHS + [
os.path.join(here, "other", "locale"),
]
class AppModuleStub:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
@contextmanager
def patch_formats(lang, **settings):
from django.utils.formats import _format_cache
# Populate _format_cache with temporary values
for key, value in settings.items():
_format_cache[(key, lang)] = value
try:
yield
finally:
reset_format_cache()
class TranslationTests(SimpleTestCase):
@translation.override("fr")
def test_plural(self):
"""
Test plurals with ngettext. French differs from English in that 0 is singular.
"""
self.assertEqual(
ngettext("%(num)d year", "%(num)d years", 0) % {"num": 0},
"0 année",
)
self.assertEqual(
ngettext("%(num)d year", "%(num)d years", 2) % {"num": 2},
"2 années",
)
self.assertEqual(
ngettext("%(size)d byte", "%(size)d bytes", 0) % {"size": 0}, "0 octet"
)
self.assertEqual(
ngettext("%(size)d byte", "%(size)d bytes", 2) % {"size": 2}, "2 octets"
)
def test_plural_null(self):
g = trans_null.ngettext
self.assertEqual(g("%(num)d year", "%(num)d years", 0) % {"num": 0}, "0 years")
self.assertEqual(g("%(num)d year", "%(num)d years", 1) % {"num": 1}, "1 year")
self.assertEqual(g("%(num)d year", "%(num)d years", 2) % {"num": 2}, "2 years")
@override_settings(LOCALE_PATHS=extended_locale_paths)
@translation.override("fr")
def test_multiple_plurals_per_language(self):
"""
Normally, French has 2 plurals. As other/locale/fr/LC_MESSAGES/django.po
has a different plural equation with 3 plurals, this tests if those
plural are honored.
"""
self.assertEqual(ngettext("%d singular", "%d plural", 0) % 0, "0 pluriel1")
self.assertEqual(ngettext("%d singular", "%d plural", 1) % 1, "1 singulier")
self.assertEqual(ngettext("%d singular", "%d plural", 2) % 2, "2 pluriel2")
french = trans_real.catalog()
# Internal _catalog can query subcatalogs (from different po files).
self.assertEqual(french._catalog[("%d singular", 0)], "%d singulier")
self.assertEqual(french._catalog[("%(num)d hour", 0)], "%(num)d heure")
def test_override(self):
activate("de")
try:
with translation.override("pl"):
self.assertEqual(get_language(), "pl")
self.assertEqual(get_language(), "de")
with translation.override(None):
self.assertIsNone(get_language())
with translation.override("pl"):
pass
self.assertIsNone(get_language())
self.assertEqual(get_language(), "de")
finally:
deactivate()
def test_override_decorator(self):
@translation.override("pl")
def func_pl():
self.assertEqual(get_language(), "pl")
@translation.override(None)
def func_none():
self.assertIsNone(get_language())
try:
activate("de")
func_pl()
self.assertEqual(get_language(), "de")
func_none()
self.assertEqual(get_language(), "de")
finally:
deactivate()
def test_override_exit(self):
"""
The language restored is the one used when the function was
called, not the one used when the decorator was initialized (#23381).
"""
activate("fr")
@translation.override("pl")
def func_pl():
pass
deactivate()
try:
activate("en")
func_pl()
self.assertEqual(get_language(), "en")
finally:
deactivate()
def test_lazy_objects(self):
"""
Format string interpolation should work with *_lazy objects.
"""
s = gettext_lazy("Add %(name)s")
d = {"name": "Ringo"}
self.assertEqual("Add Ringo", s % d)
with translation.override("de", deactivate=True):
self.assertEqual("Ringo hinzuf\xfcgen", s % d)
with translation.override("pl"):
self.assertEqual("Dodaj Ringo", s % d)
# It should be possible to compare *_lazy objects.
s1 = gettext_lazy("Add %(name)s")
self.assertEqual(s, s1)
s2 = gettext_lazy("Add %(name)s")
s3 = gettext_lazy("Add %(name)s")
self.assertEqual(s2, s3)
self.assertEqual(s, s2)
s4 = gettext_lazy("Some other string")
self.assertNotEqual(s, s4)
def test_lazy_pickle(self):
s1 = gettext_lazy("test")
self.assertEqual(str(s1), "test")
s2 = pickle.loads(pickle.dumps(s1))
self.assertEqual(str(s2), "test")
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_ngettext_lazy(self):
simple_with_format = ngettext_lazy("%d good result", "%d good results")
simple_context_with_format = npgettext_lazy(
"Exclamation", "%d good result", "%d good results"
)
simple_without_format = ngettext_lazy("good result", "good results")
with translation.override("de"):
self.assertEqual(simple_with_format % 1, "1 gutes Resultat")
self.assertEqual(simple_with_format % 4, "4 guten Resultate")
self.assertEqual(simple_context_with_format % 1, "1 gutes Resultat!")
self.assertEqual(simple_context_with_format % 4, "4 guten Resultate!")
self.assertEqual(simple_without_format % 1, "gutes Resultat")
self.assertEqual(simple_without_format % 4, "guten Resultate")
complex_nonlazy = ngettext_lazy(
"Hi %(name)s, %(num)d good result", "Hi %(name)s, %(num)d good results", 4
)
complex_deferred = ngettext_lazy(
"Hi %(name)s, %(num)d good result",
"Hi %(name)s, %(num)d good results",
"num",
)
complex_context_nonlazy = npgettext_lazy(
"Greeting",
"Hi %(name)s, %(num)d good result",
"Hi %(name)s, %(num)d good results",
4,
)
complex_context_deferred = npgettext_lazy(
"Greeting",
"Hi %(name)s, %(num)d good result",
"Hi %(name)s, %(num)d good results",
"num",
)
with translation.override("de"):
self.assertEqual(
complex_nonlazy % {"num": 4, "name": "Jim"},
"Hallo Jim, 4 guten Resultate",
)
self.assertEqual(
complex_deferred % {"name": "Jim", "num": 1},
"Hallo Jim, 1 gutes Resultat",
)
self.assertEqual(
complex_deferred % {"name": "Jim", "num": 5},
"Hallo Jim, 5 guten Resultate",
)
with self.assertRaisesMessage(KeyError, "Your dictionary lacks key"):
complex_deferred % {"name": "Jim"}
self.assertEqual(
complex_context_nonlazy % {"num": 4, "name": "Jim"},
"Willkommen Jim, 4 guten Resultate",
)
self.assertEqual(
complex_context_deferred % {"name": "Jim", "num": 1},
"Willkommen Jim, 1 gutes Resultat",
)
self.assertEqual(
complex_context_deferred % {"name": "Jim", "num": 5},
"Willkommen Jim, 5 guten Resultate",
)
with self.assertRaisesMessage(KeyError, "Your dictionary lacks key"):
complex_context_deferred % {"name": "Jim"}
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_ngettext_lazy_format_style(self):
simple_with_format = ngettext_lazy("{} good result", "{} good results")
simple_context_with_format = npgettext_lazy(
"Exclamation", "{} good result", "{} good results"
)
with translation.override("de"):
self.assertEqual(simple_with_format.format(1), "1 gutes Resultat")
self.assertEqual(simple_with_format.format(4), "4 guten Resultate")
self.assertEqual(simple_context_with_format.format(1), "1 gutes Resultat!")
self.assertEqual(simple_context_with_format.format(4), "4 guten Resultate!")
complex_nonlazy = ngettext_lazy(
"Hi {name}, {num} good result", "Hi {name}, {num} good results", 4
)
complex_deferred = ngettext_lazy(
"Hi {name}, {num} good result", "Hi {name}, {num} good results", "num"
)
complex_context_nonlazy = npgettext_lazy(
"Greeting",
"Hi {name}, {num} good result",
"Hi {name}, {num} good results",
4,
)
complex_context_deferred = npgettext_lazy(
"Greeting",
"Hi {name}, {num} good result",
"Hi {name}, {num} good results",
"num",
)
with translation.override("de"):
self.assertEqual(
complex_nonlazy.format(num=4, name="Jim"),
"Hallo Jim, 4 guten Resultate",
)
self.assertEqual(
complex_deferred.format(name="Jim", num=1),
"Hallo Jim, 1 gutes Resultat",
)
self.assertEqual(
complex_deferred.format(name="Jim", num=5),
"Hallo Jim, 5 guten Resultate",
)
with self.assertRaisesMessage(KeyError, "Your dictionary lacks key"):
complex_deferred.format(name="Jim")
self.assertEqual(
complex_context_nonlazy.format(num=4, name="Jim"),
"Willkommen Jim, 4 guten Resultate",
)
self.assertEqual(
complex_context_deferred.format(name="Jim", num=1),
"Willkommen Jim, 1 gutes Resultat",
)
self.assertEqual(
complex_context_deferred.format(name="Jim", num=5),
"Willkommen Jim, 5 guten Resultate",
)
with self.assertRaisesMessage(KeyError, "Your dictionary lacks key"):
complex_context_deferred.format(name="Jim")
def test_ngettext_lazy_bool(self):
self.assertTrue(ngettext_lazy("%d good result", "%d good results"))
self.assertFalse(ngettext_lazy("", ""))
def test_ngettext_lazy_pickle(self):
s1 = ngettext_lazy("%d good result", "%d good results")
self.assertEqual(s1 % 1, "1 good result")
self.assertEqual(s1 % 8, "8 good results")
s2 = pickle.loads(pickle.dumps(s1))
self.assertEqual(s2 % 1, "1 good result")
self.assertEqual(s2 % 8, "8 good results")
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_pgettext(self):
trans_real._active = Local()
trans_real._translations = {}
with translation.override("de"):
self.assertEqual(pgettext("unexisting", "May"), "May")
self.assertEqual(pgettext("month name", "May"), "Mai")
self.assertEqual(pgettext("verb", "May"), "Kann")
self.assertEqual(
npgettext("search", "%d result", "%d results", 4) % 4, "4 Resultate"
)
def test_empty_value(self):
"""Empty value must stay empty after being translated (#23196)."""
with translation.override("de"):
self.assertEqual("", gettext(""))
s = mark_safe("")
self.assertEqual(s, gettext(s))
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_safe_status(self):
"""
Translating a string requiring no auto-escaping with gettext or pgettext
shouldn't change the "safe" status.
"""
trans_real._active = Local()
trans_real._translations = {}
s1 = mark_safe("Password")
s2 = mark_safe("May")
with translation.override("de", deactivate=True):
self.assertIs(type(gettext(s1)), SafeString)
self.assertIs(type(pgettext("month name", s2)), SafeString)
self.assertEqual("aPassword", SafeString("a") + s1)
self.assertEqual("Passworda", s1 + SafeString("a"))
self.assertEqual("Passworda", s1 + mark_safe("a"))
self.assertEqual("aPassword", mark_safe("a") + s1)
self.assertEqual("as", mark_safe("a") + mark_safe("s"))
def test_maclines(self):
"""
Translations on files with Mac or DOS end of lines will be converted
to unix EOF in .po catalogs.
"""
ca_translation = trans_real.translation("ca")
ca_translation._catalog["Mac\nEOF\n"] = "Catalan Mac\nEOF\n"
ca_translation._catalog["Win\nEOF\n"] = "Catalan Win\nEOF\n"
with translation.override("ca", deactivate=True):
self.assertEqual("Catalan Mac\nEOF\n", gettext("Mac\rEOF\r"))
self.assertEqual("Catalan Win\nEOF\n", gettext("Win\r\nEOF\r\n"))
def test_to_locale(self):
tests = (
("en", "en"),
("EN", "en"),
("en-us", "en_US"),
("EN-US", "en_US"),
("en_US", "en_US"),
# With > 2 characters after the dash.
("sr-latn", "sr_Latn"),
("sr-LATN", "sr_Latn"),
("sr_Latn", "sr_Latn"),
# 3-char language codes.
("ber-MA", "ber_MA"),
("BER-MA", "ber_MA"),
("BER_MA", "ber_MA"),
("ber_MA", "ber_MA"),
# With private use subtag (x-informal).
("nl-nl-x-informal", "nl_NL-x-informal"),
("NL-NL-X-INFORMAL", "nl_NL-x-informal"),
("sr-latn-x-informal", "sr_Latn-x-informal"),
("SR-LATN-X-INFORMAL", "sr_Latn-x-informal"),
)
for lang, locale in tests:
with self.subTest(lang=lang):
self.assertEqual(to_locale(lang), locale)
def test_to_language(self):
self.assertEqual(to_language("en_US"), "en-us")
self.assertEqual(to_language("sr_Lat"), "sr-lat")
def test_language_bidi(self):
self.assertIs(get_language_bidi(), False)
with translation.override(None):
self.assertIs(get_language_bidi(), False)
def test_language_bidi_null(self):
self.assertIs(trans_null.get_language_bidi(), False)
with override_settings(LANGUAGE_CODE="he"):
self.assertIs(get_language_bidi(), True)
class TranslationLoadingTests(SimpleTestCase):
def setUp(self):
"""Clear translation state."""
self._old_language = get_language()
self._old_translations = trans_real._translations
deactivate()
trans_real._translations = {}
def tearDown(self):
trans_real._translations = self._old_translations
activate(self._old_language)
@override_settings(
USE_I18N=True,
LANGUAGE_CODE="en",
LANGUAGES=[
("en", "English"),
("en-ca", "English (Canada)"),
("en-nz", "English (New Zealand)"),
("en-au", "English (Australia)"),
],
LOCALE_PATHS=[os.path.join(here, "loading")],
INSTALLED_APPS=["i18n.loading_app"],
)
def test_translation_loading(self):
"""
"loading_app" does not have translations for all languages provided by
"loading". Catalogs are merged correctly.
"""
tests = [
("en", "local country person"),
("en_AU", "aussie"),
("en_NZ", "kiwi"),
("en_CA", "canuck"),
]
# Load all relevant translations.
for language, _ in tests:
activate(language)
# Catalogs are merged correctly.
for language, nickname in tests:
with self.subTest(language=language):
activate(language)
self.assertEqual(gettext("local country person"), nickname)
class TranslationThreadSafetyTests(SimpleTestCase):
def setUp(self):
self._old_language = get_language()
self._translations = trans_real._translations
# here we rely on .split() being called inside the _fetch()
# in trans_real.translation()
class sideeffect_str(str):
def split(self, *args, **kwargs):
res = str.split(self, *args, **kwargs)
trans_real._translations["en-YY"] = None
return res
trans_real._translations = {sideeffect_str("en-XX"): None}
def tearDown(self):
trans_real._translations = self._translations
activate(self._old_language)
def test_bug14894_translation_activate_thread_safety(self):
translation_count = len(trans_real._translations)
# May raise RuntimeError if translation.activate() isn't thread-safe.
translation.activate("pl")
# make sure sideeffect_str actually added a new translation
self.assertLess(translation_count, len(trans_real._translations))
class FormattingTests(SimpleTestCase):
def setUp(self):
super().setUp()
self.n = decimal.Decimal("66666.666")
self.f = 99999.999
self.d = datetime.date(2009, 12, 31)
self.dt = datetime.datetime(2009, 12, 31, 20, 50)
self.t = datetime.time(10, 15, 48)
self.long = 10000
self.ctxt = Context(
{
"n": self.n,
"t": self.t,
"d": self.d,
"dt": self.dt,
"f": self.f,
"l": self.long,
}
)
def test_all_format_strings(self):
all_locales = LANG_INFO.keys()
some_date = datetime.date(2017, 10, 14)
some_datetime = datetime.datetime(2017, 10, 14, 10, 23)
for locale in all_locales:
with self.subTest(locale=locale), translation.override(locale):
self.assertIn(
"2017", date_format(some_date)
) # Uses DATE_FORMAT by default
self.assertIn(
"23", time_format(some_datetime)
) # Uses TIME_FORMAT by default
self.assertIn(
"2017",
date_format(some_datetime, format=get_format("DATETIME_FORMAT")),
)
self.assertIn(
"2017",
date_format(some_date, format=get_format("YEAR_MONTH_FORMAT")),
)
self.assertIn(
"14", date_format(some_date, format=get_format("MONTH_DAY_FORMAT"))
)
self.assertIn(
"2017",
date_format(some_date, format=get_format("SHORT_DATE_FORMAT")),
)
self.assertIn(
"2017",
date_format(
some_datetime, format=get_format("SHORT_DATETIME_FORMAT")
),
)
def test_locale_independent(self):
"""
Localization of numbers
"""
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual(
"66666.66",
nformat(
self.n, decimal_sep=".", decimal_pos=2, grouping=3, thousand_sep=","
),
)
self.assertEqual(
"66666A6",
nformat(
self.n, decimal_sep="A", decimal_pos=1, grouping=1, thousand_sep="B"
),
)
self.assertEqual(
"66666",
nformat(
self.n, decimal_sep="X", decimal_pos=0, grouping=1, thousand_sep="Y"
),
)
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual(
"66,666.66",
nformat(
self.n, decimal_sep=".", decimal_pos=2, grouping=3, thousand_sep=","
),
)
self.assertEqual(
"6B6B6B6B6A6",
nformat(
self.n, decimal_sep="A", decimal_pos=1, grouping=1, thousand_sep="B"
),
)
self.assertEqual(
"-66666.6", nformat(-66666.666, decimal_sep=".", decimal_pos=1)
)
self.assertEqual(
"-66666.0", nformat(int("-66666"), decimal_sep=".", decimal_pos=1)
)
self.assertEqual(
"10000.0", nformat(self.long, decimal_sep=".", decimal_pos=1)
)
self.assertEqual(
"10,00,00,000.00",
nformat(
100000000.00,
decimal_sep=".",
decimal_pos=2,
grouping=(3, 2, 0),
thousand_sep=",",
),
)
self.assertEqual(
"1,0,00,000,0000.00",
nformat(
10000000000.00,
decimal_sep=".",
decimal_pos=2,
grouping=(4, 3, 2, 1, 0),
thousand_sep=",",
),
)
self.assertEqual(
"10000,00,000.00",
nformat(
1000000000.00,
decimal_sep=".",
decimal_pos=2,
grouping=(3, 2, -1),
thousand_sep=",",
),
)
# This unusual grouping/force_grouping combination may be triggered
# by the intcomma filter.
self.assertEqual(
"10000",
nformat(
self.long,
decimal_sep=".",
decimal_pos=0,
grouping=0,
force_grouping=True,
),
)
# date filter
self.assertEqual(
"31.12.2009 в 20:50",
Template('{{ dt|date:"d.m.Y в H:i" }}').render(self.ctxt),
)
self.assertEqual(
"⌚ 10:15", Template('{{ t|time:"⌚ H:i" }}').render(self.ctxt)
)
@ignore_warnings(category=RemovedInDjango50Warning)
@override_settings(USE_L10N=False)
def test_l10n_disabled(self):
"""
Catalan locale with format i18n disabled translations will be used,
but not formats
"""
with translation.override("ca", deactivate=True):
self.maxDiff = 3000
self.assertEqual("N j, Y", get_format("DATE_FORMAT"))
self.assertEqual(0, get_format("FIRST_DAY_OF_WEEK"))
self.assertEqual(".", get_format("DECIMAL_SEPARATOR"))
self.assertEqual("10:15 a.m.", time_format(self.t))
self.assertEqual("Des. 31, 2009", date_format(self.d))
self.assertEqual("desembre 2009", date_format(self.d, "YEAR_MONTH_FORMAT"))
self.assertEqual(
"12/31/2009 8:50 p.m.", date_format(self.dt, "SHORT_DATETIME_FORMAT")
)
self.assertEqual("No localizable", localize("No localizable"))
self.assertEqual("66666.666", localize(self.n))
self.assertEqual("99999.999", localize(self.f))
self.assertEqual("10000", localize(self.long))
self.assertEqual("Des. 31, 2009", localize(self.d))
self.assertEqual("Des. 31, 2009, 8:50 p.m.", localize(self.dt))
self.assertEqual("66666.666", Template("{{ n }}").render(self.ctxt))
self.assertEqual("99999.999", Template("{{ f }}").render(self.ctxt))
self.assertEqual("Des. 31, 2009", Template("{{ d }}").render(self.ctxt))
self.assertEqual(
"Des. 31, 2009, 8:50 p.m.", Template("{{ dt }}").render(self.ctxt)
)
self.assertEqual(
"66666.67", Template('{{ n|floatformat:"2u" }}').render(self.ctxt)
)
self.assertEqual(
"100000.0", Template('{{ f|floatformat:"u" }}').render(self.ctxt)
)
self.assertEqual(
"66666.67",
Template('{{ n|floatformat:"2gu" }}').render(self.ctxt),
)
self.assertEqual(
"100000.0",
Template('{{ f|floatformat:"ug" }}').render(self.ctxt),
)
self.assertEqual(
"10:15 a.m.", Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt)
)
self.assertEqual(
"12/31/2009",
Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt),
)
self.assertEqual(
"12/31/2009 8:50 p.m.",
Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt),
)
form = I18nForm(
{
"decimal_field": "66666,666",
"float_field": "99999,999",
"date_field": "31/12/2009",
"datetime_field": "31/12/2009 20:50",
"time_field": "20:50",
"integer_field": "1.234",
}
)
self.assertFalse(form.is_valid())
self.assertEqual(["Introdu\xefu un n\xfamero."], form.errors["float_field"])
self.assertEqual(
["Introdu\xefu un n\xfamero."], form.errors["decimal_field"]
)
self.assertEqual(
["Introdu\xefu una data v\xe0lida."], form.errors["date_field"]
)
self.assertEqual(
["Introdu\xefu una data/hora v\xe0lides."],
form.errors["datetime_field"],
)
self.assertEqual(
["Introdu\xefu un n\xfamero enter."], form.errors["integer_field"]
)
form2 = SelectDateForm(
{
"date_field_month": "12",
"date_field_day": "31",
"date_field_year": "2009",
}
)
self.assertTrue(form2.is_valid())
self.assertEqual(
datetime.date(2009, 12, 31), form2.cleaned_data["date_field"]
)
self.assertHTMLEqual(
'<select name="mydate_month" id="id_mydate_month">'
'<option value="">---</option>'
'<option value="1">gener</option>'
'<option value="2">febrer</option>'
'<option value="3">mar\xe7</option>'
'<option value="4">abril</option>'
'<option value="5">maig</option>'
'<option value="6">juny</option>'
'<option value="7">juliol</option>'
'<option value="8">agost</option>'
'<option value="9">setembre</option>'
'<option value="10">octubre</option>'
'<option value="11">novembre</option>'
'<option value="12" selected>desembre</option>'
"</select>"
'<select name="mydate_day" id="id_mydate_day">'
'<option value="">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected>31</option>'
"</select>"
'<select name="mydate_year" id="id_mydate_year">'
'<option value="">---</option>'
'<option value="2009" selected>2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
"</select>",
forms.SelectDateWidget(years=range(2009, 2019)).render(
"mydate", datetime.date(2009, 12, 31)
),
)
# We shouldn't change the behavior of the floatformat filter re:
# thousand separator and grouping when localization is disabled
# even if the USE_THOUSAND_SEPARATOR, NUMBER_GROUPING and
# THOUSAND_SEPARATOR settings are specified.
with self.settings(
USE_THOUSAND_SEPARATOR=True, NUMBER_GROUPING=1, THOUSAND_SEPARATOR="!"
):
self.assertEqual(
"66666.67", Template('{{ n|floatformat:"2u" }}').render(self.ctxt)
)
self.assertEqual(
"100000.0", Template('{{ f|floatformat:"u" }}').render(self.ctxt)
)
def test_false_like_locale_formats(self):
"""
The active locale's formats take precedence over the default settings
even if they would be interpreted as False in a conditional test
(e.g. 0 or empty string) (#16938).
"""
with translation.override("fr"):
with self.settings(USE_THOUSAND_SEPARATOR=True, THOUSAND_SEPARATOR="!"):
self.assertEqual("\xa0", get_format("THOUSAND_SEPARATOR"))
# Even a second time (after the format has been cached)...
self.assertEqual("\xa0", get_format("THOUSAND_SEPARATOR"))
with self.settings(FIRST_DAY_OF_WEEK=0):
self.assertEqual(1, get_format("FIRST_DAY_OF_WEEK"))
# Even a second time (after the format has been cached)...
self.assertEqual(1, get_format("FIRST_DAY_OF_WEEK"))
def test_l10n_enabled(self):
self.maxDiff = 3000
# Catalan locale
with translation.override("ca", deactivate=True):
self.assertEqual(r"j E \d\e Y", get_format("DATE_FORMAT"))
self.assertEqual(1, get_format("FIRST_DAY_OF_WEEK"))
self.assertEqual(",", get_format("DECIMAL_SEPARATOR"))
self.assertEqual("10:15", time_format(self.t))
self.assertEqual("31 desembre de 2009", date_format(self.d))
self.assertEqual("1 abril de 2009", date_format(datetime.date(2009, 4, 1)))
self.assertEqual(
"desembre del 2009", date_format(self.d, "YEAR_MONTH_FORMAT")
)
self.assertEqual(
"31/12/2009 20:50", date_format(self.dt, "SHORT_DATETIME_FORMAT")
)
self.assertEqual("No localizable", localize("No localizable"))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual("66.666,666", localize(self.n))
self.assertEqual("99.999,999", localize(self.f))
self.assertEqual("10.000", localize(self.long))
self.assertEqual("True", localize(True))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual("66666,666", localize(self.n))
self.assertEqual("99999,999", localize(self.f))
self.assertEqual("10000", localize(self.long))
self.assertEqual("31 desembre de 2009", localize(self.d))
self.assertEqual("31 desembre de 2009 a les 20:50", localize(self.dt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual("66.666,666", Template("{{ n }}").render(self.ctxt))
self.assertEqual("99.999,999", Template("{{ f }}").render(self.ctxt))
self.assertEqual("10.000", Template("{{ l }}").render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
form3 = I18nForm(
{
"decimal_field": "66.666,666",
"float_field": "99.999,999",
"date_field": "31/12/2009",
"datetime_field": "31/12/2009 20:50",
"time_field": "20:50",
"integer_field": "1.234",
}
)
self.assertTrue(form3.is_valid())
self.assertEqual(
decimal.Decimal("66666.666"), form3.cleaned_data["decimal_field"]
)
self.assertEqual(99999.999, form3.cleaned_data["float_field"])
self.assertEqual(
datetime.date(2009, 12, 31), form3.cleaned_data["date_field"]
)
self.assertEqual(
datetime.datetime(2009, 12, 31, 20, 50),
form3.cleaned_data["datetime_field"],
)
self.assertEqual(
datetime.time(20, 50), form3.cleaned_data["time_field"]
)
self.assertEqual(1234, form3.cleaned_data["integer_field"])
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual("66666,666", Template("{{ n }}").render(self.ctxt))
self.assertEqual("99999,999", Template("{{ f }}").render(self.ctxt))
self.assertEqual(
"31 desembre de 2009", Template("{{ d }}").render(self.ctxt)
)
self.assertEqual(
"31 desembre de 2009 a les 20:50",
Template("{{ dt }}").render(self.ctxt),
)
self.assertEqual(
"66666,67", Template("{{ n|floatformat:2 }}").render(self.ctxt)
)
self.assertEqual(
"100000,0", Template("{{ f|floatformat }}").render(self.ctxt)
)
self.assertEqual(
"66.666,67",
Template('{{ n|floatformat:"2g" }}').render(self.ctxt),
)
self.assertEqual(
"100.000,0",
Template('{{ f|floatformat:"g" }}').render(self.ctxt),
)
self.assertEqual(
"10:15", Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt)
)
self.assertEqual(
"31/12/2009",
Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt),
)
self.assertEqual(
"31/12/2009 20:50",
Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt),
)
self.assertEqual(
date_format(datetime.datetime.now(), "DATE_FORMAT"),
Template('{% now "DATE_FORMAT" %}').render(self.ctxt),
)
with self.settings(USE_THOUSAND_SEPARATOR=False):
form4 = I18nForm(
{
"decimal_field": "66666,666",
"float_field": "99999,999",
"date_field": "31/12/2009",
"datetime_field": "31/12/2009 20:50",
"time_field": "20:50",
"integer_field": "1234",
}
)
self.assertTrue(form4.is_valid())
self.assertEqual(
decimal.Decimal("66666.666"), form4.cleaned_data["decimal_field"]
)
self.assertEqual(99999.999, form4.cleaned_data["float_field"])
self.assertEqual(
datetime.date(2009, 12, 31), form4.cleaned_data["date_field"]
)
self.assertEqual(
datetime.datetime(2009, 12, 31, 20, 50),
form4.cleaned_data["datetime_field"],
)
self.assertEqual(
datetime.time(20, 50), form4.cleaned_data["time_field"]
)
self.assertEqual(1234, form4.cleaned_data["integer_field"])
form5 = SelectDateForm(
{
"date_field_month": "12",
"date_field_day": "31",
"date_field_year": "2009",
}
)
self.assertTrue(form5.is_valid())
self.assertEqual(
datetime.date(2009, 12, 31), form5.cleaned_data["date_field"]
)
self.assertHTMLEqual(
'<select name="mydate_day" id="id_mydate_day">'
'<option value="">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected>31</option>'
"</select>"
'<select name="mydate_month" id="id_mydate_month">'
'<option value="">---</option>'
'<option value="1">gener</option>'
'<option value="2">febrer</option>'
'<option value="3">mar\xe7</option>'
'<option value="4">abril</option>'
'<option value="5">maig</option>'
'<option value="6">juny</option>'
'<option value="7">juliol</option>'
'<option value="8">agost</option>'
'<option value="9">setembre</option>'
'<option value="10">octubre</option>'
'<option value="11">novembre</option>'
'<option value="12" selected>desembre</option>'
"</select>"
'<select name="mydate_year" id="id_mydate_year">'
'<option value="">---</option>'
'<option value="2009" selected>2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
"</select>",
forms.SelectDateWidget(years=range(2009, 2019)).render(
"mydate", datetime.date(2009, 12, 31)
),
)
# Russian locale (with E as month)
with translation.override("ru", deactivate=True):
self.assertHTMLEqual(
'<select name="mydate_day" id="id_mydate_day">'
'<option value="">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected>31</option>'
"</select>"
'<select name="mydate_month" id="id_mydate_month">'
'<option value="">---</option>'
'<option value="1">\u042f\u043d\u0432\u0430\u0440\u044c</option>'
'<option value="2">\u0424\u0435\u0432\u0440\u0430\u043b\u044c</option>'
'<option value="3">\u041c\u0430\u0440\u0442</option>'
'<option value="4">\u0410\u043f\u0440\u0435\u043b\u044c</option>'
'<option value="5">\u041c\u0430\u0439</option>'
'<option value="6">\u0418\u044e\u043d\u044c</option>'
'<option value="7">\u0418\u044e\u043b\u044c</option>'
'<option value="8">\u0410\u0432\u0433\u0443\u0441\u0442</option>'
'<option value="9">\u0421\u0435\u043d\u0442\u044f\u0431\u0440\u044c'
"</option>"
'<option value="10">\u041e\u043a\u0442\u044f\u0431\u0440\u044c</option>'
'<option value="11">\u041d\u043e\u044f\u0431\u0440\u044c</option>'
'<option value="12" selected>\u0414\u0435\u043a\u0430\u0431\u0440\u044c'
"</option>"
"</select>"
'<select name="mydate_year" id="id_mydate_year">'
'<option value="">---</option>'
'<option value="2009" selected>2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
"</select>",
forms.SelectDateWidget(years=range(2009, 2019)).render(
"mydate", datetime.date(2009, 12, 31)
),
)
# English locale
with translation.override("en", deactivate=True):
self.assertEqual("N j, Y", get_format("DATE_FORMAT"))
self.assertEqual(0, get_format("FIRST_DAY_OF_WEEK"))
self.assertEqual(".", get_format("DECIMAL_SEPARATOR"))
self.assertEqual("Dec. 31, 2009", date_format(self.d))
self.assertEqual("December 2009", date_format(self.d, "YEAR_MONTH_FORMAT"))
self.assertEqual(
"12/31/2009 8:50 p.m.", date_format(self.dt, "SHORT_DATETIME_FORMAT")
)
self.assertEqual("No localizable", localize("No localizable"))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual("66,666.666", localize(self.n))
self.assertEqual("99,999.999", localize(self.f))
self.assertEqual("10,000", localize(self.long))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual("66666.666", localize(self.n))
self.assertEqual("99999.999", localize(self.f))
self.assertEqual("10000", localize(self.long))
self.assertEqual("Dec. 31, 2009", localize(self.d))
self.assertEqual("Dec. 31, 2009, 8:50 p.m.", localize(self.dt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual("66,666.666", Template("{{ n }}").render(self.ctxt))
self.assertEqual("99,999.999", Template("{{ f }}").render(self.ctxt))
self.assertEqual("10,000", Template("{{ l }}").render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual("66666.666", Template("{{ n }}").render(self.ctxt))
self.assertEqual("99999.999", Template("{{ f }}").render(self.ctxt))
self.assertEqual("Dec. 31, 2009", Template("{{ d }}").render(self.ctxt))
self.assertEqual(
"Dec. 31, 2009, 8:50 p.m.", Template("{{ dt }}").render(self.ctxt)
)
self.assertEqual(
"66666.67", Template("{{ n|floatformat:2 }}").render(self.ctxt)
)
self.assertEqual(
"100000.0", Template("{{ f|floatformat }}").render(self.ctxt)
)
self.assertEqual(
"66,666.67",
Template('{{ n|floatformat:"2g" }}').render(self.ctxt),
)
self.assertEqual(
"100,000.0",
Template('{{ f|floatformat:"g" }}').render(self.ctxt),
)
self.assertEqual(
"12/31/2009",
Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt),
)
self.assertEqual(
"12/31/2009 8:50 p.m.",
Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt),
)
form5 = I18nForm(
{
"decimal_field": "66666.666",
"float_field": "99999.999",
"date_field": "12/31/2009",
"datetime_field": "12/31/2009 20:50",
"time_field": "20:50",
"integer_field": "1234",
}
)
self.assertTrue(form5.is_valid())
self.assertEqual(
decimal.Decimal("66666.666"), form5.cleaned_data["decimal_field"]
)
self.assertEqual(99999.999, form5.cleaned_data["float_field"])
self.assertEqual(
datetime.date(2009, 12, 31), form5.cleaned_data["date_field"]
)
self.assertEqual(
datetime.datetime(2009, 12, 31, 20, 50),
form5.cleaned_data["datetime_field"],
)
self.assertEqual(datetime.time(20, 50), form5.cleaned_data["time_field"])
self.assertEqual(1234, form5.cleaned_data["integer_field"])
form6 = SelectDateForm(
{
"date_field_month": "12",
"date_field_day": "31",
"date_field_year": "2009",
}
)
self.assertTrue(form6.is_valid())
self.assertEqual(
datetime.date(2009, 12, 31), form6.cleaned_data["date_field"]
)
self.assertHTMLEqual(
'<select name="mydate_month" id="id_mydate_month">'
'<option value="">---</option>'
'<option value="1">January</option>'
'<option value="2">February</option>'
'<option value="3">March</option>'
'<option value="4">April</option>'
'<option value="5">May</option>'
'<option value="6">June</option>'
'<option value="7">July</option>'
'<option value="8">August</option>'
'<option value="9">September</option>'
'<option value="10">October</option>'
'<option value="11">November</option>'
'<option value="12" selected>December</option>'
"</select>"
'<select name="mydate_day" id="id_mydate_day">'
'<option value="">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected>31</option>'
"</select>"
'<select name="mydate_year" id="id_mydate_year">'
'<option value="">---</option>'
'<option value="2009" selected>2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
"</select>",
forms.SelectDateWidget(years=range(2009, 2019)).render(
"mydate", datetime.date(2009, 12, 31)
),
)
def test_sub_locales(self):
"""
Check if sublocales fall back to the main locale
"""
with self.settings(USE_THOUSAND_SEPARATOR=True):
with translation.override("de-at", deactivate=True):
self.assertEqual("66.666,666", Template("{{ n }}").render(self.ctxt))
with translation.override("es-us", deactivate=True):
self.assertEqual("31 de diciembre de 2009", date_format(self.d))
def test_localized_input(self):
"""
Tests if form input is correctly localized
"""
self.maxDiff = 1200
with translation.override("de-at", deactivate=True):
form6 = CompanyForm(
{
"name": "acme",
"date_added": datetime.datetime(2009, 12, 31, 6, 0, 0),
"cents_paid": decimal.Decimal("59.47"),
"products_delivered": 12000,
}
)
self.assertTrue(form6.is_valid())
self.assertHTMLEqual(
form6.as_ul(),
'<li><label for="id_name">Name:</label>'
'<input id="id_name" type="text" name="name" value="acme" '
' maxlength="50" required></li>'
'<li><label for="id_date_added">Date added:</label>'
'<input type="text" name="date_added" value="31.12.2009 06:00:00" '
' id="id_date_added" required></li>'
'<li><label for="id_cents_paid">Cents paid:</label>'
'<input type="text" name="cents_paid" value="59,47" id="id_cents_paid" '
" required></li>"
'<li><label for="id_products_delivered">Products delivered:</label>'
'<input type="text" name="products_delivered" value="12000" '
' id="id_products_delivered" required>'
"</li>",
)
self.assertEqual(
localize_input(datetime.datetime(2009, 12, 31, 6, 0, 0)),
"31.12.2009 06:00:00",
)
self.assertEqual(
datetime.datetime(2009, 12, 31, 6, 0, 0),
form6.cleaned_data["date_added"],
)
with self.settings(USE_THOUSAND_SEPARATOR=True):
# Checking for the localized "products_delivered" field
self.assertInHTML(
'<input type="text" name="products_delivered" '
'value="12.000" id="id_products_delivered" required>',
form6.as_ul(),
)
def test_localized_input_func(self):
tests = (
(True, "True"),
(datetime.date(1, 1, 1), "0001-01-01"),
(datetime.datetime(1, 1, 1), "0001-01-01 00:00:00"),
)
with self.settings(USE_THOUSAND_SEPARATOR=True):
for value, expected in tests:
with self.subTest(value=value):
self.assertEqual(localize_input(value), expected)
def test_sanitize_strftime_format(self):
for year in (1, 99, 999, 1000):
dt = datetime.date(year, 1, 1)
for fmt, expected in [
("%C", "%02d" % (year // 100)),
("%F", "%04d-01-01" % year),
("%G", "%04d" % year),
("%Y", "%04d" % year),
]:
with self.subTest(year=year, fmt=fmt):
fmt = sanitize_strftime_format(fmt)
self.assertEqual(dt.strftime(fmt), expected)
def test_sanitize_strftime_format_with_escaped_percent(self):
dt = datetime.date(1, 1, 1)
for fmt, expected in [
("%%C", "%C"),
("%%F", "%F"),
("%%G", "%G"),
("%%Y", "%Y"),
("%%%%C", "%%C"),
("%%%%F", "%%F"),
("%%%%G", "%%G"),
("%%%%Y", "%%Y"),
]:
with self.subTest(fmt=fmt):
fmt = sanitize_strftime_format(fmt)
self.assertEqual(dt.strftime(fmt), expected)
for year in (1, 99, 999, 1000):
dt = datetime.date(year, 1, 1)
for fmt, expected in [
("%%%C", "%%%02d" % (year // 100)),
("%%%F", "%%%04d-01-01" % year),
("%%%G", "%%%04d" % year),
("%%%Y", "%%%04d" % year),
("%%%%%C", "%%%%%02d" % (year // 100)),
("%%%%%F", "%%%%%04d-01-01" % year),
("%%%%%G", "%%%%%04d" % year),
("%%%%%Y", "%%%%%04d" % year),
]:
with self.subTest(year=year, fmt=fmt):
fmt = sanitize_strftime_format(fmt)
self.assertEqual(dt.strftime(fmt), expected)
def test_sanitize_separators(self):
"""
Tests django.utils.formats.sanitize_separators.
"""
# Non-strings are untouched
self.assertEqual(sanitize_separators(123), 123)
with translation.override("ru", deactivate=True):
# Russian locale has non-breaking space (\xa0) as thousand separator
# Usual space is accepted too when sanitizing inputs
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual(sanitize_separators("1\xa0234\xa0567"), "1234567")
self.assertEqual(sanitize_separators("77\xa0777,777"), "77777.777")
self.assertEqual(sanitize_separators("12 345"), "12345")
self.assertEqual(sanitize_separators("77 777,777"), "77777.777")
with translation.override(None): # RemovedInDjango50Warning
with self.settings(USE_THOUSAND_SEPARATOR=True, THOUSAND_SEPARATOR="."):
self.assertEqual(sanitize_separators("12\xa0345"), "12\xa0345")
with self.settings(USE_THOUSAND_SEPARATOR=True):
with patch_formats(
get_language(), THOUSAND_SEPARATOR=".", DECIMAL_SEPARATOR=","
):
self.assertEqual(sanitize_separators("10.234"), "10234")
# Suspicion that user entered dot as decimal separator (#22171)
self.assertEqual(sanitize_separators("10.10"), "10.10")
# RemovedInDjango50Warning: When the deprecation ends, remove
# @ignore_warnings and USE_L10N=False. The assertions should remain
# because format-related settings will take precedence over
# locale-dictated formats.
with ignore_warnings(category=RemovedInDjango50Warning):
with self.settings(USE_L10N=False):
with self.settings(DECIMAL_SEPARATOR=","):
self.assertEqual(sanitize_separators("1001,10"), "1001.10")
self.assertEqual(sanitize_separators("1001.10"), "1001.10")
with self.settings(
DECIMAL_SEPARATOR=",",
THOUSAND_SEPARATOR=".",
USE_THOUSAND_SEPARATOR=True,
):
self.assertEqual(sanitize_separators("1.001,10"), "1001.10")
self.assertEqual(sanitize_separators("1001,10"), "1001.10")
self.assertEqual(sanitize_separators("1001.10"), "1001.10")
# Invalid output.
self.assertEqual(sanitize_separators("1,001.10"), "1.001.10")
def test_iter_format_modules(self):
"""
Tests the iter_format_modules function.
"""
# Importing some format modules so that we can compare the returned
# modules with these expected modules
default_mod = import_module("django.conf.locale.de.formats")
test_mod = import_module("i18n.other.locale.de.formats")
test_mod2 = import_module("i18n.other2.locale.de.formats")
with translation.override("de-at", deactivate=True):
# Should return the correct default module when no setting is set
self.assertEqual(list(iter_format_modules("de")), [default_mod])
# When the setting is a string, should return the given module and
# the default module
self.assertEqual(
list(iter_format_modules("de", "i18n.other.locale")),
[test_mod, default_mod],
)
# When setting is a list of strings, should return the given
# modules and the default module
self.assertEqual(
list(
iter_format_modules(
"de", ["i18n.other.locale", "i18n.other2.locale"]
)
),
[test_mod, test_mod2, default_mod],
)
def test_iter_format_modules_stability(self):
"""
Tests the iter_format_modules function always yields format modules in
a stable and correct order in presence of both base ll and ll_CC formats.
"""
en_format_mod = import_module("django.conf.locale.en.formats")
en_gb_format_mod = import_module("django.conf.locale.en_GB.formats")
self.assertEqual(
list(iter_format_modules("en-gb")), [en_gb_format_mod, en_format_mod]
)
def test_get_format_modules_lang(self):
with translation.override("de", deactivate=True):
self.assertEqual(".", get_format("DECIMAL_SEPARATOR", lang="en"))
def test_get_format_lazy_format(self):
self.assertEqual(get_format(gettext_lazy("DATE_FORMAT")), "N j, Y")
def test_localize_templatetag_and_filter(self):
"""
Test the {% localize %} templatetag and the localize/unlocalize filters.
"""
context = Context(
{"int": 1455, "float": 3.14, "date": datetime.date(2016, 12, 31)}
)
template1 = Template(
"{% load l10n %}{% localize %}"
"{{ int }}/{{ float }}/{{ date }}{% endlocalize %}; "
"{% localize on %}{{ int }}/{{ float }}/{{ date }}{% endlocalize %}"
)
template2 = Template(
"{% load l10n %}{{ int }}/{{ float }}/{{ date }}; "
"{% localize off %}{{ int }}/{{ float }}/{{ date }};{% endlocalize %} "
"{{ int }}/{{ float }}/{{ date }}"
)
template3 = Template(
"{% load l10n %}{{ int }}/{{ float }}/{{ date }}; "
"{{ int|unlocalize }}/{{ float|unlocalize }}/{{ date|unlocalize }}"
)
template4 = Template(
"{% load l10n %}{{ int }}/{{ float }}/{{ date }}; "
"{{ int|localize }}/{{ float|localize }}/{{ date|localize }}"
)
expected_localized = "1.455/3,14/31. Dezember 2016"
expected_unlocalized = "1455/3.14/Dez. 31, 2016"
output1 = "; ".join([expected_localized, expected_localized])
output2 = "; ".join(
[expected_localized, expected_unlocalized, expected_localized]
)
output3 = "; ".join([expected_localized, expected_unlocalized])
output4 = "; ".join([expected_unlocalized, expected_localized])
with translation.override("de", deactivate=True):
# RemovedInDjango50Warning: When the deprecation ends, remove
# @ignore_warnings and USE_L10N=False. The assertions should remain
# because format-related settings will take precedence over
# locale-dictated formats.
with ignore_warnings(category=RemovedInDjango50Warning):
with self.settings(
USE_L10N=False,
DATE_FORMAT="N j, Y",
DECIMAL_SEPARATOR=".",
NUMBER_GROUPING=0,
USE_THOUSAND_SEPARATOR=True,
):
self.assertEqual(template1.render(context), output1)
self.assertEqual(template4.render(context), output4)
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual(template1.render(context), output1)
self.assertEqual(template2.render(context), output2)
self.assertEqual(template3.render(context), output3)
def test_localized_off_numbers(self):
"""A string representation is returned for unlocalized numbers."""
template = Template(
"{% load l10n %}{% localize off %}"
"{{ int }}/{{ float }}/{{ decimal }}{% endlocalize %}"
)
context = Context(
{"int": 1455, "float": 3.14, "decimal": decimal.Decimal("24.1567")}
)
with self.settings(
DECIMAL_SEPARATOR=",",
USE_THOUSAND_SEPARATOR=True,
THOUSAND_SEPARATOR="°",
NUMBER_GROUPING=2,
):
self.assertEqual(template.render(context), "1455/3.14/24.1567")
# RemovedInDjango50Warning.
with ignore_warnings(category=RemovedInDjango50Warning):
with self.settings(
USE_L10N=False,
DECIMAL_SEPARATOR=",",
USE_THOUSAND_SEPARATOR=True,
THOUSAND_SEPARATOR="°",
NUMBER_GROUPING=2,
):
self.assertEqual(template.render(context), "1455/3.14/24.1567")
def test_localized_as_text_as_hidden_input(self):
"""
Form input with 'as_hidden' or 'as_text' is correctly localized.
"""
self.maxDiff = 1200
with translation.override("de-at", deactivate=True):
template = Template(
"{% load l10n %}{{ form.date_added }}; {{ form.cents_paid }}"
)
template_as_text = Template(
"{% load l10n %}"
"{{ form.date_added.as_text }}; {{ form.cents_paid.as_text }}"
)
template_as_hidden = Template(
"{% load l10n %}"
"{{ form.date_added.as_hidden }}; {{ form.cents_paid.as_hidden }}"
)
form = CompanyForm(
{
"name": "acme",
"date_added": datetime.datetime(2009, 12, 31, 6, 0, 0),
"cents_paid": decimal.Decimal("59.47"),
"products_delivered": 12000,
}
)
context = Context({"form": form})
self.assertTrue(form.is_valid())
self.assertHTMLEqual(
template.render(context),
'<input id="id_date_added" name="date_added" type="text" '
'value="31.12.2009 06:00:00" required>;'
'<input id="id_cents_paid" name="cents_paid" type="text" value="59,47" '
"required>",
)
self.assertHTMLEqual(
template_as_text.render(context),
'<input id="id_date_added" name="date_added" type="text" '
'value="31.12.2009 06:00:00" required>;'
'<input id="id_cents_paid" name="cents_paid" type="text" value="59,47" '
"required>",
)
self.assertHTMLEqual(
template_as_hidden.render(context),
'<input id="id_date_added" name="date_added" type="hidden" '
'value="31.12.2009 06:00:00">;'
'<input id="id_cents_paid" name="cents_paid" type="hidden" '
'value="59,47">',
)
def test_format_arbitrary_settings(self):
self.assertEqual(get_format("DEBUG"), "DEBUG")
def test_get_custom_format(self):
reset_format_cache()
with self.settings(FORMAT_MODULE_PATH="i18n.other.locale"):
with translation.override("fr", deactivate=True):
self.assertEqual("d/m/Y CUSTOM", get_format("CUSTOM_DAY_FORMAT"))
def test_admin_javascript_supported_input_formats(self):
"""
The first input format for DATE_INPUT_FORMATS, TIME_INPUT_FORMATS, and
DATETIME_INPUT_FORMATS must not contain %f since that's unsupported by
the admin's time picker widget.
"""
regex = re.compile("%([^BcdHImMpSwxXyY%])")
for language_code, language_name in settings.LANGUAGES:
for format_name in (
"DATE_INPUT_FORMATS",
"TIME_INPUT_FORMATS",
"DATETIME_INPUT_FORMATS",
):
with self.subTest(language=language_code, format=format_name):
formatter = get_format(format_name, lang=language_code)[0]
self.assertEqual(
regex.findall(formatter),
[],
"%s locale's %s uses an unsupported format code."
% (language_code, format_name),
)
class MiscTests(SimpleTestCase):
rf = RequestFactory()
@override_settings(LANGUAGE_CODE="de")
def test_english_fallback(self):
"""
With a non-English LANGUAGE_CODE and if the active language is English
or one of its variants, the untranslated string should be returned
(instead of falling back to LANGUAGE_CODE) (See #24413).
"""
self.assertEqual(gettext("Image"), "Bild")
with translation.override("en"):
self.assertEqual(gettext("Image"), "Image")
with translation.override("en-us"):
self.assertEqual(gettext("Image"), "Image")
with translation.override("en-ca"):
self.assertEqual(gettext("Image"), "Image")
def test_parse_spec_http_header(self):
"""
Testing HTTP header parsing. First, we test that we can parse the
values according to the spec (and that we extract all the pieces in
the right order).
"""
tests = [
# Good headers
("de", [("de", 1.0)]),
("en-AU", [("en-au", 1.0)]),
("es-419", [("es-419", 1.0)]),
("*;q=1.00", [("*", 1.0)]),
("en-AU;q=0.123", [("en-au", 0.123)]),
("en-au;q=0.5", [("en-au", 0.5)]),
("en-au;q=1.0", [("en-au", 1.0)]),
("da, en-gb;q=0.25, en;q=0.5", [("da", 1.0), ("en", 0.5), ("en-gb", 0.25)]),
("en-au-xx", [("en-au-xx", 1.0)]),
(
"de,en-au;q=0.75,en-us;q=0.5,en;q=0.25,es;q=0.125,fa;q=0.125",
[
("de", 1.0),
("en-au", 0.75),
("en-us", 0.5),
("en", 0.25),
("es", 0.125),
("fa", 0.125),
],
),
("*", [("*", 1.0)]),
("de;q=0.", [("de", 0.0)]),
("en; q=1,", [("en", 1.0)]),
("en; q=1.0, * ; q=0.5", [("en", 1.0), ("*", 0.5)]),
# Bad headers
("en-gb;q=1.0000", []),
("en;q=0.1234", []),
("en;q=.2", []),
("abcdefghi-au", []),
("**", []),
("en,,gb", []),
("en-au;q=0.1.0", []),
(("X" * 97) + "Z,en", []),
("da, en-gb;q=0.8, en;q=0.7,#", []),
("de;q=2.0", []),
("de;q=0.a", []),
("12-345", []),
("", []),
("en;q=1e0", []),
("en-au;q=1.0", []),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertEqual(
trans_real.parse_accept_lang_header(value), tuple(expected)
)
def test_parse_literal_http_header(self):
"""
Now test that we parse a literal HTTP header correctly.
"""
g = get_language_from_request
r = self.rf.get("/")
r.COOKIES = {}
r.META = {"HTTP_ACCEPT_LANGUAGE": "pt-br"}
self.assertEqual("pt-br", g(r))
r.META = {"HTTP_ACCEPT_LANGUAGE": "pt"}
self.assertEqual("pt", g(r))
r.META = {"HTTP_ACCEPT_LANGUAGE": "es,de"}
self.assertEqual("es", g(r))
r.META = {"HTTP_ACCEPT_LANGUAGE": "es-ar,de"}
self.assertEqual("es-ar", g(r))
# This test assumes there won't be a Django translation to a US
# variation of the Spanish language, a safe assumption. When the
# user sets it as the preferred language, the main 'es'
# translation should be selected instead.
r.META = {"HTTP_ACCEPT_LANGUAGE": "es-us"}
self.assertEqual(g(r), "es")
# This tests the following scenario: there isn't a main language (zh)
# translation of Django but there is a translation to variation (zh-hans)
# the user sets zh-hans as the preferred language, it should be selected
# by Django without falling back nor ignoring it.
r.META = {"HTTP_ACCEPT_LANGUAGE": "zh-hans,de"}
self.assertEqual(g(r), "zh-hans")
r.META = {"HTTP_ACCEPT_LANGUAGE": "NL"}
self.assertEqual("nl", g(r))
r.META = {"HTTP_ACCEPT_LANGUAGE": "fy"}
self.assertEqual("fy", g(r))
r.META = {"HTTP_ACCEPT_LANGUAGE": "ia"}
self.assertEqual("ia", g(r))
r.META = {"HTTP_ACCEPT_LANGUAGE": "sr-latn"}
self.assertEqual("sr-latn", g(r))
r.META = {"HTTP_ACCEPT_LANGUAGE": "zh-hans"}
self.assertEqual("zh-hans", g(r))
r.META = {"HTTP_ACCEPT_LANGUAGE": "zh-hant"}
self.assertEqual("zh-hant", g(r))
@override_settings(
LANGUAGES=[
("en", "English"),
("zh-hans", "Simplified Chinese"),
("zh-hant", "Traditional Chinese"),
]
)
def test_support_for_deprecated_chinese_language_codes(self):
"""
Some browsers (Firefox, IE, etc.) use deprecated language codes. As these
language codes will be removed in Django 1.9, these will be incorrectly
matched. For example zh-tw (traditional) will be interpreted as zh-hans
(simplified), which is wrong. So we should also accept these deprecated
language codes.
refs #18419 -- this is explicitly for browser compatibility
"""
g = get_language_from_request
r = self.rf.get("/")
r.COOKIES = {}
r.META = {"HTTP_ACCEPT_LANGUAGE": "zh-cn,en"}
self.assertEqual(g(r), "zh-hans")
r.META = {"HTTP_ACCEPT_LANGUAGE": "zh-tw,en"}
self.assertEqual(g(r), "zh-hant")
def test_special_fallback_language(self):
"""
Some languages may have special fallbacks that don't follow the simple
'fr-ca' -> 'fr' logic (notably Chinese codes).
"""
r = self.rf.get("/")
r.COOKIES = {}
r.META = {"HTTP_ACCEPT_LANGUAGE": "zh-my,en"}
self.assertEqual(get_language_from_request(r), "zh-hans")
def test_subsequent_code_fallback_language(self):
"""
Subsequent language codes should be used when the language code is not
supported.
"""
tests = [
("zh-Hans-CN", "zh-hans"),
("zh-hans-mo", "zh-hans"),
("zh-hans-HK", "zh-hans"),
("zh-Hant-HK", "zh-hant"),
("zh-hant-tw", "zh-hant"),
("zh-hant-SG", "zh-hant"),
]
r = self.rf.get("/")
r.COOKIES = {}
for value, expected in tests:
with self.subTest(value=value):
r.META = {"HTTP_ACCEPT_LANGUAGE": f"{value},en"}
self.assertEqual(get_language_from_request(r), expected)
def test_parse_language_cookie(self):
"""
Now test that we parse language preferences stored in a cookie correctly.
"""
g = get_language_from_request
r = self.rf.get("/")
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: "pt-br"}
r.META = {}
self.assertEqual("pt-br", g(r))
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: "pt"}
r.META = {}
self.assertEqual("pt", g(r))
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: "es"}
r.META = {"HTTP_ACCEPT_LANGUAGE": "de"}
self.assertEqual("es", g(r))
# This test assumes there won't be a Django translation to a US
# variation of the Spanish language, a safe assumption. When the
# user sets it as the preferred language, the main 'es'
# translation should be selected instead.
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: "es-us"}
r.META = {}
self.assertEqual(g(r), "es")
# This tests the following scenario: there isn't a main language (zh)
# translation of Django but there is a translation to variation (zh-hans)
# the user sets zh-hans as the preferred language, it should be selected
# by Django without falling back nor ignoring it.
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: "zh-hans"}
r.META = {"HTTP_ACCEPT_LANGUAGE": "de"}
self.assertEqual(g(r), "zh-hans")
@override_settings(
USE_I18N=True,
LANGUAGES=[
("en", "English"),
("ar-dz", "Algerian Arabic"),
("de", "German"),
("de-at", "Austrian German"),
("pt-BR", "Portuguese (Brazil)"),
],
)
def test_get_supported_language_variant_real(self):
g = trans_real.get_supported_language_variant
self.assertEqual(g("en"), "en")
self.assertEqual(g("en-gb"), "en")
self.assertEqual(g("de"), "de")
self.assertEqual(g("de-at"), "de-at")
self.assertEqual(g("de-ch"), "de")
self.assertEqual(g("pt-br"), "pt-br")
self.assertEqual(g("pt-BR"), "pt-BR")
self.assertEqual(g("pt"), "pt-br")
self.assertEqual(g("pt-pt"), "pt-br")
self.assertEqual(g("ar-dz"), "ar-dz")
self.assertEqual(g("ar-DZ"), "ar-DZ")
with self.assertRaises(LookupError):
g("pt", strict=True)
with self.assertRaises(LookupError):
g("pt-pt", strict=True)
with self.assertRaises(LookupError):
g("xyz")
with self.assertRaises(LookupError):
g("xy-zz")
def test_get_supported_language_variant_null(self):
g = trans_null.get_supported_language_variant
self.assertEqual(g(settings.LANGUAGE_CODE), settings.LANGUAGE_CODE)
with self.assertRaises(LookupError):
g("pt")
with self.assertRaises(LookupError):
g("de")
with self.assertRaises(LookupError):
g("de-at")
with self.assertRaises(LookupError):
g("de", strict=True)
with self.assertRaises(LookupError):
g("de-at", strict=True)
with self.assertRaises(LookupError):
g("xyz")
@override_settings(
LANGUAGES=[
("en", "English"),
("en-latn-us", "Latin English"),
("de", "German"),
("de-1996", "German, orthography of 1996"),
("de-at", "Austrian German"),
("de-ch-1901", "German, Swiss variant, traditional orthography"),
("i-mingo", "Mingo"),
("kl-tunumiit", "Tunumiisiut"),
("nan-hani-tw", "Hanji"),
("pl", "Polish"),
],
)
def test_get_language_from_path_real(self):
g = trans_real.get_language_from_path
tests = [
("/pl/", "pl"),
("/pl", "pl"),
("/xyz/", None),
("/en/", "en"),
("/en-gb/", "en"),
("/en-latn-us/", "en-latn-us"),
("/en-Latn-US/", "en-Latn-US"),
("/de/", "de"),
("/de-1996/", "de-1996"),
("/de-at/", "de-at"),
("/de-AT/", "de-AT"),
("/de-ch/", "de"),
("/de-ch-1901/", "de-ch-1901"),
("/de-simple-page-test/", None),
("/i-mingo/", "i-mingo"),
("/kl-tunumiit/", "kl-tunumiit"),
("/nan-hani-tw/", "nan-hani-tw"),
]
for path, language in tests:
with self.subTest(path=path):
self.assertEqual(g(path), language)
def test_get_language_from_path_null(self):
g = trans_null.get_language_from_path
self.assertIsNone(g("/pl/"))
self.assertIsNone(g("/pl"))
self.assertIsNone(g("/xyz/"))
def test_cache_resetting(self):
"""
After setting LANGUAGE, the cache should be cleared and languages
previously valid should not be used (#14170).
"""
g = get_language_from_request
r = self.rf.get("/")
r.COOKIES = {}
r.META = {"HTTP_ACCEPT_LANGUAGE": "pt-br"}
self.assertEqual("pt-br", g(r))
with self.settings(LANGUAGES=[("en", "English")]):
self.assertNotEqual("pt-br", g(r))
def test_i18n_patterns_returns_list(self):
with override_settings(USE_I18N=False):
self.assertIsInstance(i18n_patterns([]), list)
with override_settings(USE_I18N=True):
self.assertIsInstance(i18n_patterns([]), list)
class ResolutionOrderI18NTests(SimpleTestCase):
def setUp(self):
super().setUp()
activate("de")
def tearDown(self):
deactivate()
super().tearDown()
def assertGettext(self, msgid, msgstr):
result = gettext(msgid)
self.assertIn(
msgstr,
result,
"The string '%s' isn't in the translation of '%s'; the actual result is "
"'%s'." % (msgstr, msgid, result),
)
class AppResolutionOrderI18NTests(ResolutionOrderI18NTests):
@override_settings(LANGUAGE_CODE="de")
def test_app_translation(self):
# Original translation.
self.assertGettext("Date/time", "Datum/Zeit")
# Different translation.
with self.modify_settings(INSTALLED_APPS={"append": "i18n.resolution"}):
# Force refreshing translations.
activate("de")
# Doesn't work because it's added later in the list.
self.assertGettext("Date/time", "Datum/Zeit")
with self.modify_settings(
INSTALLED_APPS={"remove": "django.contrib.admin.apps.SimpleAdminConfig"}
):
# Force refreshing translations.
activate("de")
# Unless the original is removed from the list.
self.assertGettext("Date/time", "Datum/Zeit (APP)")
@override_settings(LOCALE_PATHS=extended_locale_paths)
class LocalePathsResolutionOrderI18NTests(ResolutionOrderI18NTests):
def test_locale_paths_translation(self):
self.assertGettext("Time", "LOCALE_PATHS")
def test_locale_paths_override_app_translation(self):
with self.settings(INSTALLED_APPS=["i18n.resolution"]):
self.assertGettext("Time", "LOCALE_PATHS")
class DjangoFallbackResolutionOrderI18NTests(ResolutionOrderI18NTests):
def test_django_fallback(self):
self.assertEqual(gettext("Date/time"), "Datum/Zeit")
@override_settings(INSTALLED_APPS=["i18n.territorial_fallback"])
class TranslationFallbackI18NTests(ResolutionOrderI18NTests):
def test_sparse_territory_catalog(self):
"""
Untranslated strings for territorial language variants use the
translations of the generic language. In this case, the de-de
translation falls back to de.
"""
with translation.override("de-de"):
self.assertGettext("Test 1 (en)", "(de-de)")
self.assertGettext("Test 2 (en)", "(de)")
class TestModels(TestCase):
def test_lazy(self):
tm = TestModel()
tm.save()
def test_safestr(self):
c = Company(cents_paid=12, products_delivered=1)
c.name = SafeString("Iñtërnâtiônàlizætiøn1")
c.save()
class TestLanguageInfo(SimpleTestCase):
def test_localized_language_info(self):
li = get_language_info("de")
self.assertEqual(li["code"], "de")
self.assertEqual(li["name_local"], "Deutsch")
self.assertEqual(li["name"], "German")
self.assertIs(li["bidi"], False)
def test_unknown_language_code(self):
with self.assertRaisesMessage(KeyError, "Unknown language code xx"):
get_language_info("xx")
with translation.override("xx"):
# A language with no translation catalogs should fallback to the
# untranslated string.
self.assertEqual(gettext("Title"), "Title")
def test_unknown_only_country_code(self):
li = get_language_info("de-xx")
self.assertEqual(li["code"], "de")
self.assertEqual(li["name_local"], "Deutsch")
self.assertEqual(li["name"], "German")
self.assertIs(li["bidi"], False)
def test_unknown_language_code_and_country_code(self):
with self.assertRaisesMessage(KeyError, "Unknown language code xx-xx and xx"):
get_language_info("xx-xx")
def test_fallback_language_code(self):
"""
get_language_info return the first fallback language info if the lang_info
struct does not contain the 'name' key.
"""
li = get_language_info("zh-my")
self.assertEqual(li["code"], "zh-hans")
li = get_language_info("zh-hans")
self.assertEqual(li["code"], "zh-hans")
@override_settings(
USE_I18N=True,
LANGUAGES=[
("en", "English"),
("fr", "French"),
],
MIDDLEWARE=[
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
],
ROOT_URLCONF="i18n.urls",
)
class LocaleMiddlewareTests(TestCase):
def test_streaming_response(self):
# Regression test for #5241
response = self.client.get("/fr/streaming/")
self.assertContains(response, "Oui/Non")
response = self.client.get("/en/streaming/")
self.assertContains(response, "Yes/No")
@override_settings(
USE_I18N=True,
LANGUAGES=[
("en", "English"),
("de", "German"),
("fr", "French"),
],
MIDDLEWARE=[
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
],
ROOT_URLCONF="i18n.urls_default_unprefixed",
LANGUAGE_CODE="en",
)
class UnprefixedDefaultLanguageTests(SimpleTestCase):
def test_default_lang_without_prefix(self):
"""
With i18n_patterns(..., prefix_default_language=False), the default
language (settings.LANGUAGE_CODE) should be accessible without a prefix.
"""
response = self.client.get("/simple/")
self.assertEqual(response.content, b"Yes")
def test_other_lang_with_prefix(self):
response = self.client.get("/fr/simple/")
self.assertEqual(response.content, b"Oui")
def test_unprefixed_language_other_than_accept_language(self):
response = self.client.get("/simple/", HTTP_ACCEPT_LANGUAGE="fr")
self.assertEqual(response.content, b"Yes")
def test_page_with_dash(self):
# A page starting with /de* shouldn't match the 'de' language code.
response = self.client.get("/de-simple-page-test/")
self.assertEqual(response.content, b"Yes")
def test_no_redirect_on_404(self):
"""
A request for a nonexistent URL shouldn't cause a redirect to
/<default_language>/<request_url> when prefix_default_language=False and
/<default_language>/<request_url> has a URL match (#27402).
"""
# A match for /group1/group2/ must exist for this to act as a
# regression test.
response = self.client.get("/group1/group2/")
self.assertEqual(response.status_code, 200)
response = self.client.get("/nonexistent/")
self.assertEqual(response.status_code, 404)
@override_settings(
USE_I18N=True,
LANGUAGES=[
("bg", "Bulgarian"),
("en-us", "English"),
("pt-br", "Portuguese (Brazil)"),
],
MIDDLEWARE=[
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
],
ROOT_URLCONF="i18n.urls",
)
class CountrySpecificLanguageTests(SimpleTestCase):
rf = RequestFactory()
def test_check_for_language(self):
self.assertTrue(check_for_language("en"))
self.assertTrue(check_for_language("en-us"))
self.assertTrue(check_for_language("en-US"))
self.assertFalse(check_for_language("en_US"))
self.assertTrue(check_for_language("be"))
self.assertTrue(check_for_language("be@latin"))
self.assertTrue(check_for_language("sr-RS@latin"))
self.assertTrue(check_for_language("sr-RS@12345"))
self.assertFalse(check_for_language("en-ü"))
self.assertFalse(check_for_language("en\x00"))
self.assertFalse(check_for_language(None))
self.assertFalse(check_for_language("be@ "))
# Specifying encoding is not supported (Django enforces UTF-8)
self.assertFalse(check_for_language("tr-TR.UTF-8"))
self.assertFalse(check_for_language("tr-TR.UTF8"))
self.assertFalse(check_for_language("de-DE.utf-8"))
def test_check_for_language_null(self):
self.assertIs(trans_null.check_for_language("en"), True)
def test_get_language_from_request(self):
# issue 19919
r = self.rf.get("/")
r.COOKIES = {}
r.META = {"HTTP_ACCEPT_LANGUAGE": "en-US,en;q=0.8,bg;q=0.6,ru;q=0.4"}
lang = get_language_from_request(r)
self.assertEqual("en-us", lang)
r = self.rf.get("/")
r.COOKIES = {}
r.META = {"HTTP_ACCEPT_LANGUAGE": "bg-bg,en-US;q=0.8,en;q=0.6,ru;q=0.4"}
lang = get_language_from_request(r)
self.assertEqual("bg", lang)
def test_get_language_from_request_null(self):
lang = trans_null.get_language_from_request(None)
self.assertEqual(lang, "en")
with override_settings(LANGUAGE_CODE="de"):
lang = trans_null.get_language_from_request(None)
self.assertEqual(lang, "de")
def test_specific_language_codes(self):
# issue 11915
r = self.rf.get("/")
r.COOKIES = {}
r.META = {"HTTP_ACCEPT_LANGUAGE": "pt,en-US;q=0.8,en;q=0.6,ru;q=0.4"}
lang = get_language_from_request(r)
self.assertEqual("pt-br", lang)
r = self.rf.get("/")
r.COOKIES = {}
r.META = {"HTTP_ACCEPT_LANGUAGE": "pt-pt,en-US;q=0.8,en;q=0.6,ru;q=0.4"}
lang = get_language_from_request(r)
self.assertEqual("pt-br", lang)
class TranslationFilesMissing(SimpleTestCase):
def setUp(self):
super().setUp()
self.gettext_find_builtin = gettext_module.find
def tearDown(self):
gettext_module.find = self.gettext_find_builtin
super().tearDown()
def patchGettextFind(self):
gettext_module.find = lambda *args, **kw: None
def test_failure_finding_default_mo_files(self):
"""OSError is raised if the default language is unparseable."""
self.patchGettextFind()
trans_real._translations = {}
with self.assertRaises(OSError):
activate("en")
class NonDjangoLanguageTests(SimpleTestCase):
"""
A language non present in default Django languages can still be
installed/used by a Django project.
"""
@override_settings(
USE_I18N=True,
LANGUAGES=[
("en-us", "English"),
("xxx", "Somelanguage"),
],
LANGUAGE_CODE="xxx",
LOCALE_PATHS=[os.path.join(here, "commands", "locale")],
)
def test_non_django_language(self):
self.assertEqual(get_language(), "xxx")
self.assertEqual(gettext("year"), "reay")
@override_settings(USE_I18N=True)
def test_check_for_language(self):
with tempfile.TemporaryDirectory() as app_dir:
os.makedirs(os.path.join(app_dir, "locale", "dummy_Lang", "LC_MESSAGES"))
open(
os.path.join(
app_dir, "locale", "dummy_Lang", "LC_MESSAGES", "django.mo"
),
"w",
).close()
app_config = AppConfig("dummy_app", AppModuleStub(__path__=[app_dir]))
with mock.patch(
"django.apps.apps.get_app_configs", return_value=[app_config]
):
self.assertIs(check_for_language("dummy-lang"), True)
@override_settings(
USE_I18N=True,
LANGUAGES=[
("en-us", "English"),
# xyz language has no locale files
("xyz", "XYZ"),
],
)
@translation.override("xyz")
def test_plural_non_django_language(self):
self.assertEqual(get_language(), "xyz")
self.assertEqual(ngettext("year", "years", 2), "years")
@override_settings(USE_I18N=True)
class WatchForTranslationChangesTests(SimpleTestCase):
@override_settings(USE_I18N=False)
def test_i18n_disabled(self):
mocked_sender = mock.MagicMock()
watch_for_translation_changes(mocked_sender)
mocked_sender.watch_dir.assert_not_called()
def test_i18n_enabled(self):
mocked_sender = mock.MagicMock()
watch_for_translation_changes(mocked_sender)
self.assertGreater(mocked_sender.watch_dir.call_count, 1)
def test_i18n_locale_paths(self):
mocked_sender = mock.MagicMock()
with tempfile.TemporaryDirectory() as app_dir:
with self.settings(LOCALE_PATHS=[app_dir]):
watch_for_translation_changes(mocked_sender)
mocked_sender.watch_dir.assert_any_call(Path(app_dir), "**/*.mo")
def test_i18n_app_dirs(self):
mocked_sender = mock.MagicMock()
with self.settings(INSTALLED_APPS=["i18n.sampleproject"]):
watch_for_translation_changes(mocked_sender)
project_dir = Path(__file__).parent / "sampleproject" / "locale"
mocked_sender.watch_dir.assert_any_call(project_dir, "**/*.mo")
def test_i18n_app_dirs_ignore_django_apps(self):
mocked_sender = mock.MagicMock()
with self.settings(INSTALLED_APPS=["django.contrib.admin"]):
watch_for_translation_changes(mocked_sender)
mocked_sender.watch_dir.assert_called_once_with(Path("locale"), "**/*.mo")
def test_i18n_local_locale(self):
mocked_sender = mock.MagicMock()
watch_for_translation_changes(mocked_sender)
locale_dir = Path(__file__).parent / "locale"
mocked_sender.watch_dir.assert_any_call(locale_dir, "**/*.mo")
class TranslationFileChangedTests(SimpleTestCase):
def setUp(self):
self.gettext_translations = gettext_module._translations.copy()
self.trans_real_translations = trans_real._translations.copy()
def tearDown(self):
gettext._translations = self.gettext_translations
trans_real._translations = self.trans_real_translations
def test_ignores_non_mo_files(self):
gettext_module._translations = {"foo": "bar"}
path = Path("test.py")
self.assertIsNone(translation_file_changed(None, path))
self.assertEqual(gettext_module._translations, {"foo": "bar"})
def test_resets_cache_with_mo_files(self):
gettext_module._translations = {"foo": "bar"}
trans_real._translations = {"foo": "bar"}
trans_real._default = 1
trans_real._active = False
path = Path("test.mo")
self.assertIs(translation_file_changed(None, path), True)
self.assertEqual(gettext_module._translations, {})
self.assertEqual(trans_real._translations, {})
self.assertIsNone(trans_real._default)
self.assertIsInstance(trans_real._active, Local)
class UtilsTests(SimpleTestCase):
def test_round_away_from_one(self):
tests = [
(0, 0),
(0.0, 0),
(0.25, 0),
(0.5, 0),
(0.75, 0),
(1, 1),
(1.0, 1),
(1.25, 2),
(1.5, 2),
(1.75, 2),
(-0.0, 0),
(-0.25, -1),
(-0.5, -1),
(-0.75, -1),
(-1, -1),
(-1.0, -1),
(-1.25, -2),
(-1.5, -2),
(-1.75, -2),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertEqual(round_away_from_one(value), expected)
|
dd894f47c7cf14c49ed4f08a64587dbd9d04f7ccf37bded4c68151d7e0c54a8d | import datetime
from collections import Counter
from unittest import mock
from django.core.exceptions import ValidationError
from django.forms import (
BaseForm,
CharField,
DateField,
FileField,
Form,
IntegerField,
SplitDateTimeField,
formsets,
)
from django.forms.formsets import (
INITIAL_FORM_COUNT,
MAX_NUM_FORM_COUNT,
MIN_NUM_FORM_COUNT,
TOTAL_FORM_COUNT,
BaseFormSet,
ManagementForm,
all_valid,
formset_factory,
)
from django.forms.renderers import TemplatesSetting, get_default_renderer
from django.forms.utils import ErrorList
from django.forms.widgets import HiddenInput
from django.test import SimpleTestCase
from django.test.utils import isolate_lru_cache
from django.utils.deprecation import RemovedInDjango50Warning
from . import jinja2_tests
class Choice(Form):
choice = CharField()
votes = IntegerField()
ChoiceFormSet = formset_factory(Choice)
class ChoiceFormsetWithNonFormError(ChoiceFormSet):
def clean(self):
super().clean()
raise ValidationError("non-form error")
class FavoriteDrinkForm(Form):
name = CharField()
class BaseFavoriteDrinksFormSet(BaseFormSet):
def clean(self):
seen_drinks = []
for drink in self.cleaned_data:
if drink["name"] in seen_drinks:
raise ValidationError("You may only specify a drink once.")
seen_drinks.append(drink["name"])
# A FormSet that takes a list of favorite drinks and raises an error if
# there are any duplicates.
FavoriteDrinksFormSet = formset_factory(
FavoriteDrinkForm, formset=BaseFavoriteDrinksFormSet, extra=3
)
class CustomKwargForm(Form):
def __init__(self, *args, custom_kwarg, **kwargs):
self.custom_kwarg = custom_kwarg
super().__init__(*args, **kwargs)
class FormsFormsetTestCase(SimpleTestCase):
def make_choiceformset(
self,
formset_data=None,
formset_class=ChoiceFormSet,
total_forms=None,
initial_forms=0,
max_num_forms=0,
min_num_forms=0,
**kwargs,
):
"""
Make a ChoiceFormset from the given formset_data.
The data should be given as a list of (choice, votes) tuples.
"""
kwargs.setdefault("prefix", "choices")
kwargs.setdefault("auto_id", False)
if formset_data is None:
return formset_class(**kwargs)
if total_forms is None:
total_forms = len(formset_data)
def prefixed(*args):
args = (kwargs["prefix"],) + args
return "-".join(args)
data = {
prefixed("TOTAL_FORMS"): str(total_forms),
prefixed("INITIAL_FORMS"): str(initial_forms),
prefixed("MAX_NUM_FORMS"): str(max_num_forms),
prefixed("MIN_NUM_FORMS"): str(min_num_forms),
}
for i, (choice, votes) in enumerate(formset_data):
data[prefixed(str(i), "choice")] = choice
data[prefixed(str(i), "votes")] = votes
return formset_class(data, **kwargs)
def test_basic_formset(self):
"""
A FormSet constructor takes the same arguments as Form. Create a
FormSet for adding data. By default, it displays 1 blank form.
"""
formset = self.make_choiceformset()
self.assertHTMLEqual(
str(formset),
"""<input type="hidden" name="choices-TOTAL_FORMS" value="1">
<input type="hidden" name="choices-INITIAL_FORMS" value="0">
<input type="hidden" name="choices-MIN_NUM_FORMS" value="0">
<input type="hidden" name="choices-MAX_NUM_FORMS" value="1000">
<div>Choice:<input type="text" name="choices-0-choice"></div>
<div>Votes:<input type="number" name="choices-0-votes"></div>""",
)
# FormSet are treated similarly to Forms. FormSet has an is_valid()
# method, and a cleaned_data or errors attribute depending on whether
# all the forms passed validation. However, unlike a Form, cleaned_data
# and errors will be a list of dicts rather than a single dict.
formset = self.make_choiceformset([("Calexico", "100")])
self.assertTrue(formset.is_valid())
self.assertEqual(
[form.cleaned_data for form in formset.forms],
[{"votes": 100, "choice": "Calexico"}],
)
# If a FormSet wasn't passed any data, is_valid() and has_changed()
# return False.
formset = self.make_choiceformset()
self.assertFalse(formset.is_valid())
self.assertFalse(formset.has_changed())
def test_form_kwargs_formset(self):
"""
Custom kwargs set on the formset instance are passed to the
underlying forms.
"""
FormSet = formset_factory(CustomKwargForm, extra=2)
formset = FormSet(form_kwargs={"custom_kwarg": 1})
for form in formset:
self.assertTrue(hasattr(form, "custom_kwarg"))
self.assertEqual(form.custom_kwarg, 1)
def test_form_kwargs_formset_dynamic(self):
"""Form kwargs can be passed dynamically in a formset."""
class DynamicBaseFormSet(BaseFormSet):
def get_form_kwargs(self, index):
return {"custom_kwarg": index}
DynamicFormSet = formset_factory(
CustomKwargForm, formset=DynamicBaseFormSet, extra=2
)
formset = DynamicFormSet(form_kwargs={"custom_kwarg": "ignored"})
for i, form in enumerate(formset):
self.assertTrue(hasattr(form, "custom_kwarg"))
self.assertEqual(form.custom_kwarg, i)
def test_form_kwargs_empty_form(self):
FormSet = formset_factory(CustomKwargForm)
formset = FormSet(form_kwargs={"custom_kwarg": 1})
self.assertTrue(hasattr(formset.empty_form, "custom_kwarg"))
self.assertEqual(formset.empty_form.custom_kwarg, 1)
def test_formset_validation(self):
# FormSet instances can also have an error attribute if validation failed for
# any of the forms.
formset = self.make_choiceformset([("Calexico", "")])
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{"votes": ["This field is required."]}])
def test_formset_validation_count(self):
"""
A formset's ManagementForm is validated once per FormSet.is_valid()
call and each form of the formset is cleaned once.
"""
def make_method_counter(func):
"""Add a counter to func for the number of times it's called."""
counter = Counter()
counter.call_count = 0
def mocked_func(*args, **kwargs):
counter.call_count += 1
return func(*args, **kwargs)
return mocked_func, counter
mocked_is_valid, is_valid_counter = make_method_counter(
formsets.ManagementForm.is_valid
)
mocked_full_clean, full_clean_counter = make_method_counter(BaseForm.full_clean)
formset = self.make_choiceformset(
[("Calexico", "100"), ("Any1", "42"), ("Any2", "101")]
)
with mock.patch(
"django.forms.formsets.ManagementForm.is_valid", mocked_is_valid
), mock.patch("django.forms.forms.BaseForm.full_clean", mocked_full_clean):
self.assertTrue(formset.is_valid())
self.assertEqual(is_valid_counter.call_count, 1)
self.assertEqual(full_clean_counter.call_count, 4)
def test_formset_has_changed(self):
"""
FormSet.has_changed() is True if any data is passed to its forms, even
if the formset didn't validate.
"""
blank_formset = self.make_choiceformset([("", "")])
self.assertFalse(blank_formset.has_changed())
# invalid formset
invalid_formset = self.make_choiceformset([("Calexico", "")])
self.assertFalse(invalid_formset.is_valid())
self.assertTrue(invalid_formset.has_changed())
# valid formset
valid_formset = self.make_choiceformset([("Calexico", "100")])
self.assertTrue(valid_formset.is_valid())
self.assertTrue(valid_formset.has_changed())
def test_formset_initial_data(self):
"""
A FormSet can be prefilled with existing data by providing a list of
dicts to the `initial` argument. By default, an extra blank form is
included.
"""
formset = self.make_choiceformset(
initial=[{"choice": "Calexico", "votes": 100}]
)
self.assertHTMLEqual(
"\n".join(form.as_ul() for form in formset.forms),
'<li>Choice: <input type="text" name="choices-0-choice" value="Calexico">'
"</li>"
'<li>Votes: <input type="number" name="choices-0-votes" value="100"></li>'
'<li>Choice: <input type="text" name="choices-1-choice"></li>'
'<li>Votes: <input type="number" name="choices-1-votes"></li>',
)
def test_blank_form_unfilled(self):
"""A form that's displayed as blank may be submitted as blank."""
formset = self.make_choiceformset(
[("Calexico", "100"), ("", "")], initial_forms=1
)
self.assertTrue(formset.is_valid())
self.assertEqual(
[form.cleaned_data for form in formset.forms],
[{"votes": 100, "choice": "Calexico"}, {}],
)
def test_second_form_partially_filled(self):
"""
If at least one field is filled out on a blank form, it will be
validated.
"""
formset = self.make_choiceformset(
[("Calexico", "100"), ("The Decemberists", "")], initial_forms=1
)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {"votes": ["This field is required."]}])
def test_delete_prefilled_data(self):
"""
Deleting prefilled data is an error. Removing data from form fields
isn't the proper way to delete it.
"""
formset = self.make_choiceformset([("", ""), ("", "")], initial_forms=1)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset.errors,
[
{
"votes": ["This field is required."],
"choice": ["This field is required."],
},
{},
],
)
def test_displaying_more_than_one_blank_form(self):
"""
More than 1 empty form can be displayed using formset_factory's
`extra` argument.
"""
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(auto_id=False, prefix="choices")
self.assertHTMLEqual(
"\n".join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice"></li>
<li>Votes: <input type="number" name="choices-0-votes"></li>
<li>Choice: <input type="text" name="choices-1-choice"></li>
<li>Votes: <input type="number" name="choices-1-votes"></li>
<li>Choice: <input type="text" name="choices-2-choice"></li>
<li>Votes: <input type="number" name="choices-2-votes"></li>""",
)
# Since every form was displayed as blank, they are also accepted as
# blank. This may seem a little strange, but min_num is used to require
# a minimum number of forms to be completed.
data = {
"choices-TOTAL_FORMS": "3", # the number of forms rendered
"choices-INITIAL_FORMS": "0", # the number of forms with initial data
"choices-MIN_NUM_FORMS": "0", # min number of forms
"choices-MAX_NUM_FORMS": "0", # max number of forms
"choices-0-choice": "",
"choices-0-votes": "",
"choices-1-choice": "",
"choices-1-votes": "",
"choices-2-choice": "",
"choices-2-votes": "",
}
formset = ChoiceFormSet(data, auto_id=False, prefix="choices")
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{}, {}, {}])
def test_min_num_displaying_more_than_one_blank_form(self):
"""
More than 1 empty form can also be displayed using formset_factory's
min_num argument. It will (essentially) increment the extra argument.
"""
ChoiceFormSet = formset_factory(Choice, extra=1, min_num=1)
formset = ChoiceFormSet(auto_id=False, prefix="choices")
# Min_num forms are required; extra forms can be empty.
self.assertFalse(formset.forms[0].empty_permitted)
self.assertTrue(formset.forms[1].empty_permitted)
self.assertHTMLEqual(
"\n".join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice"></li>
<li>Votes: <input type="number" name="choices-0-votes"></li>
<li>Choice: <input type="text" name="choices-1-choice"></li>
<li>Votes: <input type="number" name="choices-1-votes"></li>""",
)
def test_min_num_displaying_more_than_one_blank_form_with_zero_extra(self):
"""More than 1 empty form can be displayed using min_num."""
ChoiceFormSet = formset_factory(Choice, extra=0, min_num=3)
formset = ChoiceFormSet(auto_id=False, prefix="choices")
self.assertHTMLEqual(
"\n".join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice"></li>
<li>Votes: <input type="number" name="choices-0-votes"></li>
<li>Choice: <input type="text" name="choices-1-choice"></li>
<li>Votes: <input type="number" name="choices-1-votes"></li>
<li>Choice: <input type="text" name="choices-2-choice"></li>
<li>Votes: <input type="number" name="choices-2-votes"></li>""",
)
def test_single_form_completed(self):
"""Just one form may be completed."""
data = {
"choices-TOTAL_FORMS": "3", # the number of forms rendered
"choices-INITIAL_FORMS": "0", # the number of forms with initial data
"choices-MIN_NUM_FORMS": "0", # min number of forms
"choices-MAX_NUM_FORMS": "0", # max number of forms
"choices-0-choice": "Calexico",
"choices-0-votes": "100",
"choices-1-choice": "",
"choices-1-votes": "",
"choices-2-choice": "",
"choices-2-votes": "",
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix="choices")
self.assertTrue(formset.is_valid())
self.assertEqual(
[form.cleaned_data for form in formset.forms],
[{"votes": 100, "choice": "Calexico"}, {}, {}],
)
def test_formset_validate_max_flag(self):
"""
If validate_max is set and max_num is less than TOTAL_FORMS in the
data, a ValidationError is raised. MAX_NUM_FORMS in the data is
irrelevant here (it's output as a hint for the client but its value
in the returned data is not checked).
"""
data = {
"choices-TOTAL_FORMS": "2", # the number of forms rendered
"choices-INITIAL_FORMS": "0", # the number of forms with initial data
"choices-MIN_NUM_FORMS": "0", # min number of forms
"choices-MAX_NUM_FORMS": "2", # max number of forms - should be ignored
"choices-0-choice": "Zero",
"choices-0-votes": "0",
"choices-1-choice": "One",
"choices-1-votes": "1",
}
ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True)
formset = ChoiceFormSet(data, auto_id=False, prefix="choices")
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ["Please submit at most 1 form."])
self.assertEqual(
str(formset.non_form_errors()),
'<ul class="errorlist nonform"><li>Please submit at most 1 form.</li></ul>',
)
def test_formset_validate_max_flag_custom_error(self):
data = {
"choices-TOTAL_FORMS": "2",
"choices-INITIAL_FORMS": "0",
"choices-MIN_NUM_FORMS": "0",
"choices-MAX_NUM_FORMS": "2",
"choices-0-choice": "Zero",
"choices-0-votes": "0",
"choices-1-choice": "One",
"choices-1-votes": "1",
}
ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True)
formset = ChoiceFormSet(
data,
auto_id=False,
prefix="choices",
error_messages={
"too_many_forms": "Number of submitted forms should be at most %(num)d."
},
)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset.non_form_errors(),
["Number of submitted forms should be at most 1."],
)
self.assertEqual(
str(formset.non_form_errors()),
'<ul class="errorlist nonform">'
"<li>Number of submitted forms should be at most 1.</li></ul>",
)
def test_formset_validate_min_flag(self):
"""
If validate_min is set and min_num is more than TOTAL_FORMS in the
data, a ValidationError is raised. MIN_NUM_FORMS in the data is
irrelevant here (it's output as a hint for the client but its value
in the returned data is not checked).
"""
data = {
"choices-TOTAL_FORMS": "2", # the number of forms rendered
"choices-INITIAL_FORMS": "0", # the number of forms with initial data
"choices-MIN_NUM_FORMS": "0", # min number of forms
"choices-MAX_NUM_FORMS": "0", # max number of forms - should be ignored
"choices-0-choice": "Zero",
"choices-0-votes": "0",
"choices-1-choice": "One",
"choices-1-votes": "1",
}
ChoiceFormSet = formset_factory(Choice, extra=1, min_num=3, validate_min=True)
formset = ChoiceFormSet(data, auto_id=False, prefix="choices")
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ["Please submit at least 3 forms."])
self.assertEqual(
str(formset.non_form_errors()),
'<ul class="errorlist nonform"><li>'
"Please submit at least 3 forms.</li></ul>",
)
def test_formset_validate_min_flag_custom_formatted_error(self):
data = {
"choices-TOTAL_FORMS": "2",
"choices-INITIAL_FORMS": "0",
"choices-MIN_NUM_FORMS": "0",
"choices-MAX_NUM_FORMS": "0",
"choices-0-choice": "Zero",
"choices-0-votes": "0",
"choices-1-choice": "One",
"choices-1-votes": "1",
}
ChoiceFormSet = formset_factory(Choice, extra=1, min_num=3, validate_min=True)
formset = ChoiceFormSet(
data,
auto_id=False,
prefix="choices",
error_messages={
"too_few_forms": "Number of submitted forms should be at least %(num)d."
},
)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset.non_form_errors(),
["Number of submitted forms should be at least 3."],
)
self.assertEqual(
str(formset.non_form_errors()),
'<ul class="errorlist nonform">'
"<li>Number of submitted forms should be at least 3.</li></ul>",
)
def test_formset_validate_min_unchanged_forms(self):
"""
min_num validation doesn't consider unchanged forms with initial data
as "empty".
"""
initial = [
{"choice": "Zero", "votes": 0},
{"choice": "One", "votes": 0},
]
data = {
"choices-TOTAL_FORMS": "2",
"choices-INITIAL_FORMS": "2",
"choices-MIN_NUM_FORMS": "0",
"choices-MAX_NUM_FORMS": "2",
"choices-0-choice": "Zero",
"choices-0-votes": "0",
"choices-1-choice": "One",
"choices-1-votes": "1", # changed from initial
}
ChoiceFormSet = formset_factory(Choice, min_num=2, validate_min=True)
formset = ChoiceFormSet(data, auto_id=False, prefix="choices", initial=initial)
self.assertFalse(formset.forms[0].has_changed())
self.assertTrue(formset.forms[1].has_changed())
self.assertTrue(formset.is_valid())
def test_formset_validate_min_excludes_empty_forms(self):
data = {
"choices-TOTAL_FORMS": "2",
"choices-INITIAL_FORMS": "0",
}
ChoiceFormSet = formset_factory(
Choice, extra=2, min_num=1, validate_min=True, can_delete=True
)
formset = ChoiceFormSet(data, prefix="choices")
self.assertFalse(formset.has_changed())
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ["Please submit at least 1 form."])
def test_second_form_partially_filled_2(self):
"""A partially completed form is invalid."""
data = {
"choices-TOTAL_FORMS": "3", # the number of forms rendered
"choices-INITIAL_FORMS": "0", # the number of forms with initial data
"choices-MIN_NUM_FORMS": "0", # min number of forms
"choices-MAX_NUM_FORMS": "0", # max number of forms
"choices-0-choice": "Calexico",
"choices-0-votes": "100",
"choices-1-choice": "The Decemberists",
"choices-1-votes": "", # missing value
"choices-2-choice": "",
"choices-2-votes": "",
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix="choices")
self.assertFalse(formset.is_valid())
self.assertEqual(
formset.errors, [{}, {"votes": ["This field is required."]}, {}]
)
def test_more_initial_data(self):
"""
The extra argument works when the formset is pre-filled with initial
data.
"""
initial = [{"choice": "Calexico", "votes": 100}]
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix="choices")
self.assertHTMLEqual(
"\n".join(form.as_ul() for form in formset.forms),
'<li>Choice: <input type="text" name="choices-0-choice" value="Calexico">'
"</li>"
'<li>Votes: <input type="number" name="choices-0-votes" value="100"></li>'
'<li>Choice: <input type="text" name="choices-1-choice"></li>'
'<li>Votes: <input type="number" name="choices-1-votes"></li>'
'<li>Choice: <input type="text" name="choices-2-choice"></li>'
'<li>Votes: <input type="number" name="choices-2-votes"></li>'
'<li>Choice: <input type="text" name="choices-3-choice"></li>'
'<li>Votes: <input type="number" name="choices-3-votes"></li>',
)
# Retrieving an empty form works. Tt shows up in the form list.
self.assertTrue(formset.empty_form.empty_permitted)
self.assertHTMLEqual(
formset.empty_form.as_ul(),
"""<li>Choice: <input type="text" name="choices-__prefix__-choice"></li>
<li>Votes: <input type="number" name="choices-__prefix__-votes"></li>""",
)
def test_formset_with_deletion(self):
"""
formset_factory's can_delete argument adds a boolean "delete" field to
each form. When that boolean field is True, the form will be in
formset.deleted_forms.
"""
ChoiceFormSet = formset_factory(Choice, can_delete=True)
initial = [
{"choice": "Calexico", "votes": 100},
{"choice": "Fergie", "votes": 900},
]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix="choices")
self.assertHTMLEqual(
"\n".join(form.as_ul() for form in formset.forms),
'<li>Choice: <input type="text" name="choices-0-choice" value="Calexico">'
"</li>"
'<li>Votes: <input type="number" name="choices-0-votes" value="100"></li>'
'<li>Delete: <input type="checkbox" name="choices-0-DELETE"></li>'
'<li>Choice: <input type="text" name="choices-1-choice" value="Fergie">'
"</li>"
'<li>Votes: <input type="number" name="choices-1-votes" value="900"></li>'
'<li>Delete: <input type="checkbox" name="choices-1-DELETE"></li>'
'<li>Choice: <input type="text" name="choices-2-choice"></li>'
'<li>Votes: <input type="number" name="choices-2-votes"></li>'
'<li>Delete: <input type="checkbox" name="choices-2-DELETE"></li>',
)
# To delete something, set that form's special delete field to 'on'.
# Let's go ahead and delete Fergie.
data = {
"choices-TOTAL_FORMS": "3", # the number of forms rendered
"choices-INITIAL_FORMS": "2", # the number of forms with initial data
"choices-MIN_NUM_FORMS": "0", # min number of forms
"choices-MAX_NUM_FORMS": "0", # max number of forms
"choices-0-choice": "Calexico",
"choices-0-votes": "100",
"choices-0-DELETE": "",
"choices-1-choice": "Fergie",
"choices-1-votes": "900",
"choices-1-DELETE": "on",
"choices-2-choice": "",
"choices-2-votes": "",
"choices-2-DELETE": "",
}
formset = ChoiceFormSet(data, auto_id=False, prefix="choices")
self.assertTrue(formset.is_valid())
self.assertEqual(
[form.cleaned_data for form in formset.forms],
[
{"votes": 100, "DELETE": False, "choice": "Calexico"},
{"votes": 900, "DELETE": True, "choice": "Fergie"},
{},
],
)
self.assertEqual(
[form.cleaned_data for form in formset.deleted_forms],
[{"votes": 900, "DELETE": True, "choice": "Fergie"}],
)
def test_formset_with_deletion_remove_deletion_flag(self):
"""
If a form is filled with something and can_delete is also checked, that
form's errors shouldn't make the entire formset invalid since it's
going to be deleted.
"""
class CheckForm(Form):
field = IntegerField(min_value=100)
data = {
"check-TOTAL_FORMS": "3", # the number of forms rendered
"check-INITIAL_FORMS": "2", # the number of forms with initial data
"choices-MIN_NUM_FORMS": "0", # min number of forms
"check-MAX_NUM_FORMS": "0", # max number of forms
"check-0-field": "200",
"check-0-DELETE": "",
"check-1-field": "50",
"check-1-DELETE": "on",
"check-2-field": "",
"check-2-DELETE": "",
}
CheckFormSet = formset_factory(CheckForm, can_delete=True)
formset = CheckFormSet(data, prefix="check")
self.assertTrue(formset.is_valid())
# If the deletion flag is removed, validation is enabled.
data["check-1-DELETE"] = ""
formset = CheckFormSet(data, prefix="check")
self.assertFalse(formset.is_valid())
def test_formset_with_deletion_invalid_deleted_form(self):
"""
deleted_forms works on a valid formset even if a deleted form would
have been invalid.
"""
FavoriteDrinkFormset = formset_factory(form=FavoriteDrinkForm, can_delete=True)
formset = FavoriteDrinkFormset(
{
"form-0-name": "",
"form-0-DELETE": "on", # no name!
"form-TOTAL_FORMS": 1,
"form-INITIAL_FORMS": 1,
"form-MIN_NUM_FORMS": 0,
"form-MAX_NUM_FORMS": 1,
}
)
self.assertTrue(formset.is_valid())
self.assertEqual(formset._errors, [])
self.assertEqual(len(formset.deleted_forms), 1)
def test_formset_with_deletion_custom_widget(self):
class DeletionAttributeFormSet(BaseFormSet):
deletion_widget = HiddenInput
class DeletionMethodFormSet(BaseFormSet):
def get_deletion_widget(self):
return HiddenInput(attrs={"class": "deletion"})
tests = [
(DeletionAttributeFormSet, '<input type="hidden" name="form-0-DELETE">'),
(
DeletionMethodFormSet,
'<input class="deletion" type="hidden" name="form-0-DELETE">',
),
]
for formset_class, delete_html in tests:
with self.subTest(formset_class=formset_class.__name__):
ArticleFormSet = formset_factory(
ArticleForm,
formset=formset_class,
can_delete=True,
)
formset = ArticleFormSet(auto_id=False)
self.assertHTMLEqual(
"\n".join([form.as_ul() for form in formset.forms]),
(
f'<li>Title: <input type="text" name="form-0-title"></li>'
f'<li>Pub date: <input type="text" name="form-0-pub_date">'
f"{delete_html}</li>"
),
)
def test_formsets_with_ordering(self):
"""
formset_factory's can_order argument adds an integer field to each
form. When form validation succeeds,
[form.cleaned_data for form in formset.forms]
will have the data in the correct order specified by the ordering
fields. If a number is duplicated in the set of ordering fields, for
instance form 0 and form 3 are both marked as 1, then the form index
used as a secondary ordering criteria. In order to put something at the
front of the list, you'd need to set its order to 0.
"""
ChoiceFormSet = formset_factory(Choice, can_order=True)
initial = [
{"choice": "Calexico", "votes": 100},
{"choice": "Fergie", "votes": 900},
]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix="choices")
self.assertHTMLEqual(
"\n".join(form.as_ul() for form in formset.forms),
'<li>Choice: <input type="text" name="choices-0-choice" value="Calexico">'
"</li>"
'<li>Votes: <input type="number" name="choices-0-votes" value="100"></li>'
'<li>Order: <input type="number" name="choices-0-ORDER" value="1"></li>'
'<li>Choice: <input type="text" name="choices-1-choice" value="Fergie">'
"</li>"
'<li>Votes: <input type="number" name="choices-1-votes" value="900"></li>'
'<li>Order: <input type="number" name="choices-1-ORDER" value="2"></li>'
'<li>Choice: <input type="text" name="choices-2-choice"></li>'
'<li>Votes: <input type="number" name="choices-2-votes"></li>'
'<li>Order: <input type="number" name="choices-2-ORDER"></li>',
)
data = {
"choices-TOTAL_FORMS": "3", # the number of forms rendered
"choices-INITIAL_FORMS": "2", # the number of forms with initial data
"choices-MIN_NUM_FORMS": "0", # min number of forms
"choices-MAX_NUM_FORMS": "0", # max number of forms
"choices-0-choice": "Calexico",
"choices-0-votes": "100",
"choices-0-ORDER": "1",
"choices-1-choice": "Fergie",
"choices-1-votes": "900",
"choices-1-ORDER": "2",
"choices-2-choice": "The Decemberists",
"choices-2-votes": "500",
"choices-2-ORDER": "0",
}
formset = ChoiceFormSet(data, auto_id=False, prefix="choices")
self.assertTrue(formset.is_valid())
self.assertEqual(
[form.cleaned_data for form in formset.ordered_forms],
[
{"votes": 500, "ORDER": 0, "choice": "The Decemberists"},
{"votes": 100, "ORDER": 1, "choice": "Calexico"},
{"votes": 900, "ORDER": 2, "choice": "Fergie"},
],
)
def test_formsets_with_ordering_custom_widget(self):
class OrderingAttributeFormSet(BaseFormSet):
ordering_widget = HiddenInput
class OrderingMethodFormSet(BaseFormSet):
def get_ordering_widget(self):
return HiddenInput(attrs={"class": "ordering"})
tests = (
(OrderingAttributeFormSet, '<input type="hidden" name="form-0-ORDER">'),
(
OrderingMethodFormSet,
'<input class="ordering" type="hidden" name="form-0-ORDER">',
),
)
for formset_class, order_html in tests:
with self.subTest(formset_class=formset_class.__name__):
ArticleFormSet = formset_factory(
ArticleForm, formset=formset_class, can_order=True
)
formset = ArticleFormSet(auto_id=False)
self.assertHTMLEqual(
"\n".join(form.as_ul() for form in formset.forms),
(
'<li>Title: <input type="text" name="form-0-title"></li>'
'<li>Pub date: <input type="text" name="form-0-pub_date">'
"%s</li>" % order_html
),
)
def test_empty_ordered_fields(self):
"""
Ordering fields are allowed to be left blank. If they are left blank,
they'll be sorted below everything else.
"""
data = {
"choices-TOTAL_FORMS": "4", # the number of forms rendered
"choices-INITIAL_FORMS": "3", # the number of forms with initial data
"choices-MIN_NUM_FORMS": "0", # min number of forms
"choices-MAX_NUM_FORMS": "0", # max number of forms
"choices-0-choice": "Calexico",
"choices-0-votes": "100",
"choices-0-ORDER": "1",
"choices-1-choice": "Fergie",
"choices-1-votes": "900",
"choices-1-ORDER": "2",
"choices-2-choice": "The Decemberists",
"choices-2-votes": "500",
"choices-2-ORDER": "",
"choices-3-choice": "Basia Bulat",
"choices-3-votes": "50",
"choices-3-ORDER": "",
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix="choices")
self.assertTrue(formset.is_valid())
self.assertEqual(
[form.cleaned_data for form in formset.ordered_forms],
[
{"votes": 100, "ORDER": 1, "choice": "Calexico"},
{"votes": 900, "ORDER": 2, "choice": "Fergie"},
{"votes": 500, "ORDER": None, "choice": "The Decemberists"},
{"votes": 50, "ORDER": None, "choice": "Basia Bulat"},
],
)
def test_ordering_blank_fieldsets(self):
"""Ordering works with blank fieldsets."""
data = {
"choices-TOTAL_FORMS": "3", # the number of forms rendered
"choices-INITIAL_FORMS": "0", # the number of forms with initial data
"choices-MIN_NUM_FORMS": "0", # min number of forms
"choices-MAX_NUM_FORMS": "0", # max number of forms
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix="choices")
self.assertTrue(formset.is_valid())
self.assertEqual(formset.ordered_forms, [])
def test_formset_with_ordering_and_deletion(self):
"""FormSets with ordering + deletion."""
ChoiceFormSet = formset_factory(Choice, can_order=True, can_delete=True)
initial = [
{"choice": "Calexico", "votes": 100},
{"choice": "Fergie", "votes": 900},
{"choice": "The Decemberists", "votes": 500},
]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix="choices")
self.assertHTMLEqual(
"\n".join(form.as_ul() for form in formset.forms),
'<li>Choice: <input type="text" name="choices-0-choice" value="Calexico">'
"</li>"
'<li>Votes: <input type="number" name="choices-0-votes" value="100"></li>'
'<li>Order: <input type="number" name="choices-0-ORDER" value="1"></li>'
'<li>Delete: <input type="checkbox" name="choices-0-DELETE"></li>'
'<li>Choice: <input type="text" name="choices-1-choice" value="Fergie">'
"</li>"
'<li>Votes: <input type="number" name="choices-1-votes" value="900"></li>'
'<li>Order: <input type="number" name="choices-1-ORDER" value="2"></li>'
'<li>Delete: <input type="checkbox" name="choices-1-DELETE"></li>'
'<li>Choice: <input type="text" name="choices-2-choice" '
'value="The Decemberists"></li>'
'<li>Votes: <input type="number" name="choices-2-votes" value="500"></li>'
'<li>Order: <input type="number" name="choices-2-ORDER" value="3"></li>'
'<li>Delete: <input type="checkbox" name="choices-2-DELETE"></li>'
'<li>Choice: <input type="text" name="choices-3-choice"></li>'
'<li>Votes: <input type="number" name="choices-3-votes"></li>'
'<li>Order: <input type="number" name="choices-3-ORDER"></li>'
'<li>Delete: <input type="checkbox" name="choices-3-DELETE"></li>',
)
# Let's delete Fergie, and put The Decemberists ahead of Calexico.
data = {
"choices-TOTAL_FORMS": "4", # the number of forms rendered
"choices-INITIAL_FORMS": "3", # the number of forms with initial data
"choices-MIN_NUM_FORMS": "0", # min number of forms
"choices-MAX_NUM_FORMS": "0", # max number of forms
"choices-0-choice": "Calexico",
"choices-0-votes": "100",
"choices-0-ORDER": "1",
"choices-0-DELETE": "",
"choices-1-choice": "Fergie",
"choices-1-votes": "900",
"choices-1-ORDER": "2",
"choices-1-DELETE": "on",
"choices-2-choice": "The Decemberists",
"choices-2-votes": "500",
"choices-2-ORDER": "0",
"choices-2-DELETE": "",
"choices-3-choice": "",
"choices-3-votes": "",
"choices-3-ORDER": "",
"choices-3-DELETE": "",
}
formset = ChoiceFormSet(data, auto_id=False, prefix="choices")
self.assertTrue(formset.is_valid())
self.assertEqual(
[form.cleaned_data for form in formset.ordered_forms],
[
{
"votes": 500,
"DELETE": False,
"ORDER": 0,
"choice": "The Decemberists",
},
{"votes": 100, "DELETE": False, "ORDER": 1, "choice": "Calexico"},
],
)
self.assertEqual(
[form.cleaned_data for form in formset.deleted_forms],
[{"votes": 900, "DELETE": True, "ORDER": 2, "choice": "Fergie"}],
)
def test_invalid_deleted_form_with_ordering(self):
"""
Can get ordered_forms from a valid formset even if a deleted form
would have been invalid.
"""
FavoriteDrinkFormset = formset_factory(
form=FavoriteDrinkForm, can_delete=True, can_order=True
)
formset = FavoriteDrinkFormset(
{
"form-0-name": "",
"form-0-DELETE": "on", # no name!
"form-TOTAL_FORMS": 1,
"form-INITIAL_FORMS": 1,
"form-MIN_NUM_FORMS": 0,
"form-MAX_NUM_FORMS": 1,
}
)
self.assertTrue(formset.is_valid())
self.assertEqual(formset.ordered_forms, [])
def test_clean_hook(self):
"""
FormSets have a clean() hook for doing extra validation that isn't tied
to any form. It follows the same pattern as the clean() hook on Forms.
"""
# Start out with a some duplicate data.
data = {
"drinks-TOTAL_FORMS": "2", # the number of forms rendered
"drinks-INITIAL_FORMS": "0", # the number of forms with initial data
"drinks-MIN_NUM_FORMS": "0", # min number of forms
"drinks-MAX_NUM_FORMS": "0", # max number of forms
"drinks-0-name": "Gin and Tonic",
"drinks-1-name": "Gin and Tonic",
}
formset = FavoriteDrinksFormSet(data, prefix="drinks")
self.assertFalse(formset.is_valid())
# Any errors raised by formset.clean() are available via the
# formset.non_form_errors() method.
for error in formset.non_form_errors():
self.assertEqual(str(error), "You may only specify a drink once.")
# The valid case still works.
data["drinks-1-name"] = "Bloody Mary"
formset = FavoriteDrinksFormSet(data, prefix="drinks")
self.assertTrue(formset.is_valid())
self.assertEqual(formset.non_form_errors(), [])
def test_limiting_max_forms(self):
"""Limiting the maximum number of forms with max_num."""
# When not passed, max_num will take a high default value, leaving the
# number of forms only controlled by the value of the extra parameter.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3)
formset = LimitedFavoriteDrinkFormSet()
self.assertHTMLEqual(
"\n".join(str(form) for form in formset.forms),
"""<div><label for="id_form-0-name">Name:</label>
<input type="text" name="form-0-name" id="id_form-0-name"></div>
<div><label for="id_form-1-name">Name:</label>
<input type="text" name="form-1-name" id="id_form-1-name"></div>
<div><label for="id_form-2-name">Name:</label>
<input type="text" name="form-2-name" id="id_form-2-name"></div>""",
)
# If max_num is 0 then no form is rendered at all.
LimitedFavoriteDrinkFormSet = formset_factory(
FavoriteDrinkForm, extra=3, max_num=0
)
formset = LimitedFavoriteDrinkFormSet()
self.assertEqual(formset.forms, [])
def test_limited_max_forms_two(self):
LimitedFavoriteDrinkFormSet = formset_factory(
FavoriteDrinkForm, extra=5, max_num=2
)
formset = LimitedFavoriteDrinkFormSet()
self.assertHTMLEqual(
"\n".join(str(form) for form in formset.forms),
"""<div><label for="id_form-0-name">Name:</label>
<input type="text" name="form-0-name" id="id_form-0-name"></div>
<div><label for="id_form-1-name">Name:</label>
<input type="text" name="form-1-name" id="id_form-1-name"></div>""",
)
def test_limiting_extra_lest_than_max_num(self):
"""max_num has no effect when extra is less than max_num."""
LimitedFavoriteDrinkFormSet = formset_factory(
FavoriteDrinkForm, extra=1, max_num=2
)
formset = LimitedFavoriteDrinkFormSet()
self.assertHTMLEqual(
"\n".join(str(form) for form in formset.forms),
"""<div><label for="id_form-0-name">Name:</label>
<input type="text" name="form-0-name" id="id_form-0-name"></div>""",
)
def test_max_num_with_initial_data(self):
# When not passed, max_num will take a high default value, leaving the
# number of forms only controlled by the value of the initial and extra
# parameters.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1)
formset = LimitedFavoriteDrinkFormSet(initial=[{"name": "Fernet and Coke"}])
self.assertHTMLEqual(
"\n".join(str(form) for form in formset.forms),
"""
<div><label for="id_form-0-name">Name:</label>
<input type="text" name="form-0-name" value="Fernet and Coke"
id="id_form-0-name"></div>
<div><label for="id_form-1-name">Name:</label>
<input type="text" name="form-1-name" id="id_form-1-name"></div>
""",
)
def test_max_num_zero(self):
"""
If max_num is 0 then no form is rendered at all, regardless of extra,
unless initial data is present.
"""
LimitedFavoriteDrinkFormSet = formset_factory(
FavoriteDrinkForm, extra=1, max_num=0
)
formset = LimitedFavoriteDrinkFormSet()
self.assertEqual(formset.forms, [])
def test_max_num_zero_with_initial(self):
# initial trumps max_num
initial = [
{"name": "Fernet and Coke"},
{"name": "Bloody Mary"},
]
LimitedFavoriteDrinkFormSet = formset_factory(
FavoriteDrinkForm, extra=1, max_num=0
)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
self.assertHTMLEqual(
"\n".join(str(form) for form in formset.forms),
"""
<div><label for="id_form-0-name">Name:</label>
<input id="id_form-0-name" name="form-0-name" type="text"
value="Fernet and Coke"></div>
<div><label for="id_form-1-name">Name:</label>
<input id="id_form-1-name" name="form-1-name" type="text"
value="Bloody Mary"></div>
""",
)
def test_more_initial_than_max_num(self):
"""
More initial forms than max_num results in all initial forms being
displayed (but no extra forms).
"""
initial = [
{"name": "Gin Tonic"},
{"name": "Bloody Mary"},
{"name": "Jack and Coke"},
]
LimitedFavoriteDrinkFormSet = formset_factory(
FavoriteDrinkForm, extra=1, max_num=2
)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
self.assertHTMLEqual(
"\n".join(str(form) for form in formset.forms),
"""
<div><label for="id_form-0-name">Name:</label>
<input id="id_form-0-name" name="form-0-name" type="text" value="Gin Tonic">
</div>
<div><label for="id_form-1-name">Name:</label>
<input id="id_form-1-name" name="form-1-name" type="text"
value="Bloody Mary"></div>
<div><label for="id_form-2-name">Name:</label>
<input id="id_form-2-name" name="form-2-name" type="text"
value="Jack and Coke"></div>
""",
)
def test_default_absolute_max(self):
# absolute_max defaults to 2 * DEFAULT_MAX_NUM if max_num is None.
data = {
"form-TOTAL_FORMS": 2001,
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "0",
}
formset = FavoriteDrinksFormSet(data=data)
self.assertIs(formset.is_valid(), False)
self.assertEqual(
formset.non_form_errors(),
["Please submit at most 1000 forms."],
)
self.assertEqual(formset.absolute_max, 2000)
def test_absolute_max(self):
data = {
"form-TOTAL_FORMS": "2001",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "0",
}
AbsoluteMaxFavoriteDrinksFormSet = formset_factory(
FavoriteDrinkForm,
absolute_max=3000,
)
formset = AbsoluteMaxFavoriteDrinksFormSet(data=data)
self.assertIs(formset.is_valid(), True)
self.assertEqual(len(formset.forms), 2001)
# absolute_max provides a hard limit.
data["form-TOTAL_FORMS"] = "3001"
formset = AbsoluteMaxFavoriteDrinksFormSet(data=data)
self.assertIs(formset.is_valid(), False)
self.assertEqual(len(formset.forms), 3000)
self.assertEqual(
formset.non_form_errors(),
["Please submit at most 1000 forms."],
)
def test_absolute_max_with_max_num(self):
data = {
"form-TOTAL_FORMS": "1001",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "0",
}
LimitedFavoriteDrinksFormSet = formset_factory(
FavoriteDrinkForm,
max_num=30,
absolute_max=1000,
)
formset = LimitedFavoriteDrinksFormSet(data=data)
self.assertIs(formset.is_valid(), False)
self.assertEqual(len(formset.forms), 1000)
self.assertEqual(
formset.non_form_errors(),
["Please submit at most 30 forms."],
)
def test_absolute_max_invalid(self):
msg = "'absolute_max' must be greater or equal to 'max_num'."
for max_num in [None, 31]:
with self.subTest(max_num=max_num):
with self.assertRaisesMessage(ValueError, msg):
formset_factory(FavoriteDrinkForm, max_num=max_num, absolute_max=30)
def test_more_initial_form_result_in_one(self):
"""
One form from initial and extra=3 with max_num=2 results in the one
initial form and one extra.
"""
LimitedFavoriteDrinkFormSet = formset_factory(
FavoriteDrinkForm, extra=3, max_num=2
)
formset = LimitedFavoriteDrinkFormSet(initial=[{"name": "Gin Tonic"}])
self.assertHTMLEqual(
"\n".join(str(form) for form in formset.forms),
"""
<div><label for="id_form-0-name">Name:</label>
<input type="text" name="form-0-name" value="Gin Tonic" id="id_form-0-name">
</div>
<div><label for="id_form-1-name">Name:</label>
<input type="text" name="form-1-name" id="id_form-1-name"></div>""",
)
def test_management_form_field_names(self):
"""The management form class has field names matching the constants."""
self.assertCountEqual(
ManagementForm.base_fields,
[
TOTAL_FORM_COUNT,
INITIAL_FORM_COUNT,
MIN_NUM_FORM_COUNT,
MAX_NUM_FORM_COUNT,
],
)
def test_management_form_prefix(self):
"""The management form has the correct prefix."""
formset = FavoriteDrinksFormSet()
self.assertEqual(formset.management_form.prefix, "form")
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "0",
}
formset = FavoriteDrinksFormSet(data=data)
self.assertEqual(formset.management_form.prefix, "form")
formset = FavoriteDrinksFormSet(initial={})
self.assertEqual(formset.management_form.prefix, "form")
def test_non_form_errors(self):
data = {
"drinks-TOTAL_FORMS": "2", # the number of forms rendered
"drinks-INITIAL_FORMS": "0", # the number of forms with initial data
"drinks-MIN_NUM_FORMS": "0", # min number of forms
"drinks-MAX_NUM_FORMS": "0", # max number of forms
"drinks-0-name": "Gin and Tonic",
"drinks-1-name": "Gin and Tonic",
}
formset = FavoriteDrinksFormSet(data, prefix="drinks")
self.assertFalse(formset.is_valid())
self.assertEqual(
formset.non_form_errors(), ["You may only specify a drink once."]
)
self.assertEqual(
str(formset.non_form_errors()),
'<ul class="errorlist nonform"><li>'
"You may only specify a drink once.</li></ul>",
)
def test_formset_iteration(self):
"""Formset instances are iterable."""
ChoiceFormset = formset_factory(Choice, extra=3)
formset = ChoiceFormset()
# An iterated formset yields formset.forms.
forms = list(formset)
self.assertEqual(forms, formset.forms)
self.assertEqual(len(formset), len(forms))
# A formset may be indexed to retrieve its forms.
self.assertEqual(formset[0], forms[0])
with self.assertRaises(IndexError):
formset[3]
# Formsets can override the default iteration order
class BaseReverseFormSet(BaseFormSet):
def __iter__(self):
return reversed(self.forms)
def __getitem__(self, idx):
return super().__getitem__(len(self) - idx - 1)
ReverseChoiceFormset = formset_factory(Choice, BaseReverseFormSet, extra=3)
reverse_formset = ReverseChoiceFormset()
# __iter__() modifies the rendering order.
# Compare forms from "reverse" formset with forms from original formset
self.assertEqual(str(reverse_formset[0]), str(forms[-1]))
self.assertEqual(str(reverse_formset[1]), str(forms[-2]))
self.assertEqual(len(reverse_formset), len(forms))
def test_formset_nonzero(self):
"""A formsets without any forms evaluates as True."""
ChoiceFormset = formset_factory(Choice, extra=0)
formset = ChoiceFormset()
self.assertEqual(len(formset.forms), 0)
self.assertTrue(formset)
def test_formset_splitdatetimefield(self):
"""
Formset works with SplitDateTimeField(initial=datetime.datetime.now).
"""
class SplitDateTimeForm(Form):
when = SplitDateTimeField(initial=datetime.datetime.now)
SplitDateTimeFormSet = formset_factory(SplitDateTimeForm)
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "0",
"form-0-when_0": "1904-06-16",
"form-0-when_1": "15:51:33",
}
formset = SplitDateTimeFormSet(data)
self.assertTrue(formset.is_valid())
def test_formset_error_class(self):
"""Formset's forms use the formset's error_class."""
class CustomErrorList(ErrorList):
pass
formset = FavoriteDrinksFormSet(error_class=CustomErrorList)
self.assertEqual(formset.forms[0].error_class, CustomErrorList)
def test_formset_calls_forms_is_valid(self):
"""Formsets call is_valid() on each form."""
class AnotherChoice(Choice):
def is_valid(self):
self.is_valid_called = True
return super().is_valid()
AnotherChoiceFormSet = formset_factory(AnotherChoice)
data = {
"choices-TOTAL_FORMS": "1", # number of forms rendered
"choices-INITIAL_FORMS": "0", # number of forms with initial data
"choices-MIN_NUM_FORMS": "0", # min number of forms
"choices-MAX_NUM_FORMS": "0", # max number of forms
"choices-0-choice": "Calexico",
"choices-0-votes": "100",
}
formset = AnotherChoiceFormSet(data, auto_id=False, prefix="choices")
self.assertTrue(formset.is_valid())
self.assertTrue(all(form.is_valid_called for form in formset.forms))
def test_hard_limit_on_instantiated_forms(self):
"""A formset has a hard limit on the number of forms instantiated."""
# reduce the default limit of 1000 temporarily for testing
_old_DEFAULT_MAX_NUM = formsets.DEFAULT_MAX_NUM
try:
formsets.DEFAULT_MAX_NUM = 2
ChoiceFormSet = formset_factory(Choice, max_num=1)
# someone fiddles with the mgmt form data...
formset = ChoiceFormSet(
{
"choices-TOTAL_FORMS": "4",
"choices-INITIAL_FORMS": "0",
"choices-MIN_NUM_FORMS": "0", # min number of forms
"choices-MAX_NUM_FORMS": "4",
"choices-0-choice": "Zero",
"choices-0-votes": "0",
"choices-1-choice": "One",
"choices-1-votes": "1",
"choices-2-choice": "Two",
"choices-2-votes": "2",
"choices-3-choice": "Three",
"choices-3-votes": "3",
},
prefix="choices",
)
# But we still only instantiate 3 forms
self.assertEqual(len(formset.forms), 3)
# and the formset isn't valid
self.assertFalse(formset.is_valid())
finally:
formsets.DEFAULT_MAX_NUM = _old_DEFAULT_MAX_NUM
def test_increase_hard_limit(self):
"""Can increase the built-in forms limit via a higher max_num."""
# reduce the default limit of 1000 temporarily for testing
_old_DEFAULT_MAX_NUM = formsets.DEFAULT_MAX_NUM
try:
formsets.DEFAULT_MAX_NUM = 3
# for this form, we want a limit of 4
ChoiceFormSet = formset_factory(Choice, max_num=4)
formset = ChoiceFormSet(
{
"choices-TOTAL_FORMS": "4",
"choices-INITIAL_FORMS": "0",
"choices-MIN_NUM_FORMS": "0", # min number of forms
"choices-MAX_NUM_FORMS": "4",
"choices-0-choice": "Zero",
"choices-0-votes": "0",
"choices-1-choice": "One",
"choices-1-votes": "1",
"choices-2-choice": "Two",
"choices-2-votes": "2",
"choices-3-choice": "Three",
"choices-3-votes": "3",
},
prefix="choices",
)
# Four forms are instantiated and no exception is raised
self.assertEqual(len(formset.forms), 4)
finally:
formsets.DEFAULT_MAX_NUM = _old_DEFAULT_MAX_NUM
def test_non_form_errors_run_full_clean(self):
"""
If non_form_errors() is called without calling is_valid() first,
it should ensure that full_clean() is called.
"""
class BaseCustomFormSet(BaseFormSet):
def clean(self):
raise ValidationError("This is a non-form error")
ChoiceFormSet = formset_factory(Choice, formset=BaseCustomFormSet)
data = {
"choices-TOTAL_FORMS": "1",
"choices-INITIAL_FORMS": "0",
}
formset = ChoiceFormSet(data, auto_id=False, prefix="choices")
self.assertIsInstance(formset.non_form_errors(), ErrorList)
self.assertEqual(list(formset.non_form_errors()), ["This is a non-form error"])
def test_validate_max_ignores_forms_marked_for_deletion(self):
class CheckForm(Form):
field = IntegerField()
data = {
"check-TOTAL_FORMS": "2",
"check-INITIAL_FORMS": "0",
"check-MAX_NUM_FORMS": "1",
"check-0-field": "200",
"check-0-DELETE": "",
"check-1-field": "50",
"check-1-DELETE": "on",
}
CheckFormSet = formset_factory(
CheckForm, max_num=1, validate_max=True, can_delete=True
)
formset = CheckFormSet(data, prefix="check")
self.assertTrue(formset.is_valid())
def test_formset_total_error_count(self):
"""A valid formset should have 0 total errors."""
data = [ # formset_data, expected error count
([("Calexico", "100")], 0),
([("Calexico", "")], 1),
([("", "invalid")], 2),
([("Calexico", "100"), ("Calexico", "")], 1),
([("Calexico", ""), ("Calexico", "")], 2),
]
for formset_data, expected_error_count in data:
formset = self.make_choiceformset(formset_data)
self.assertEqual(formset.total_error_count(), expected_error_count)
def test_formset_total_error_count_with_non_form_errors(self):
data = {
"choices-TOTAL_FORMS": "2", # the number of forms rendered
"choices-INITIAL_FORMS": "0", # the number of forms with initial data
"choices-MAX_NUM_FORMS": "2", # max number of forms - should be ignored
"choices-0-choice": "Zero",
"choices-0-votes": "0",
"choices-1-choice": "One",
"choices-1-votes": "1",
}
ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True)
formset = ChoiceFormSet(data, auto_id=False, prefix="choices")
self.assertEqual(formset.total_error_count(), 1)
data["choices-1-votes"] = ""
formset = ChoiceFormSet(data, auto_id=False, prefix="choices")
self.assertEqual(formset.total_error_count(), 2)
def test_html_safe(self):
formset = self.make_choiceformset()
self.assertTrue(hasattr(formset, "__html__"))
self.assertEqual(str(formset), formset.__html__())
def test_can_delete_extra_formset_forms(self):
ChoiceFormFormset = formset_factory(form=Choice, can_delete=True, extra=2)
formset = ChoiceFormFormset()
self.assertEqual(len(formset), 2)
self.assertIn("DELETE", formset.forms[0].fields)
self.assertIn("DELETE", formset.forms[1].fields)
def test_disable_delete_extra_formset_forms(self):
ChoiceFormFormset = formset_factory(
form=Choice,
can_delete=True,
can_delete_extra=False,
extra=2,
)
formset = ChoiceFormFormset()
self.assertEqual(len(formset), 2)
self.assertNotIn("DELETE", formset.forms[0].fields)
self.assertNotIn("DELETE", formset.forms[1].fields)
formset = ChoiceFormFormset(initial=[{"choice": "Zero", "votes": "1"}])
self.assertEqual(len(formset), 3)
self.assertIn("DELETE", formset.forms[0].fields)
self.assertNotIn("DELETE", formset.forms[1].fields)
self.assertNotIn("DELETE", formset.forms[2].fields)
formset = ChoiceFormFormset(
data={
"form-0-choice": "Zero",
"form-0-votes": "0",
"form-0-DELETE": "on",
"form-1-choice": "One",
"form-1-votes": "1",
"form-2-choice": "",
"form-2-votes": "",
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "1",
},
initial=[{"choice": "Zero", "votes": "1"}],
)
self.assertEqual(
formset.cleaned_data,
[
{"choice": "Zero", "votes": 0, "DELETE": True},
{"choice": "One", "votes": 1},
{},
],
)
self.assertIs(formset._should_delete_form(formset.forms[0]), True)
self.assertIs(formset._should_delete_form(formset.forms[1]), False)
self.assertIs(formset._should_delete_form(formset.forms[2]), False)
def test_template_name_uses_renderer_value(self):
class CustomRenderer(TemplatesSetting):
formset_template_name = "a/custom/formset/template.html"
ChoiceFormSet = formset_factory(Choice, renderer=CustomRenderer)
self.assertEqual(
ChoiceFormSet().template_name, "a/custom/formset/template.html"
)
def test_template_name_can_be_overridden(self):
class CustomFormSet(BaseFormSet):
template_name = "a/custom/formset/template.html"
ChoiceFormSet = formset_factory(Choice, formset=CustomFormSet)
self.assertEqual(
ChoiceFormSet().template_name, "a/custom/formset/template.html"
)
def test_custom_renderer(self):
"""
A custom renderer passed to a formset_factory() is passed to all forms
and ErrorList.
"""
from django.forms.renderers import Jinja2
renderer = Jinja2()
data = {
"choices-TOTAL_FORMS": "2",
"choices-INITIAL_FORMS": "0",
"choices-MIN_NUM_FORMS": "0",
"choices-0-choice": "Zero",
"choices-0-votes": "",
"choices-1-choice": "One",
"choices-1-votes": "",
}
ChoiceFormSet = formset_factory(Choice, renderer=renderer)
formset = ChoiceFormSet(data, auto_id=False, prefix="choices")
self.assertEqual(formset.renderer, renderer)
self.assertEqual(formset.forms[0].renderer, renderer)
self.assertEqual(formset.management_form.renderer, renderer)
self.assertEqual(formset.non_form_errors().renderer, renderer)
self.assertEqual(formset.empty_form.renderer, renderer)
def test_repr(self):
valid_formset = self.make_choiceformset([("test", 1)])
valid_formset.full_clean()
invalid_formset = self.make_choiceformset([("test", "")])
invalid_formset.full_clean()
partially_invalid_formset = self.make_choiceformset(
[("test", "1"), ("test", "")],
)
partially_invalid_formset.full_clean()
invalid_formset_non_form_errors_only = self.make_choiceformset(
[("test", "")],
formset_class=ChoiceFormsetWithNonFormError,
)
invalid_formset_non_form_errors_only.full_clean()
cases = [
(
self.make_choiceformset(),
"<ChoiceFormSet: bound=False valid=Unknown total_forms=1>",
),
(
self.make_choiceformset(
formset_class=formset_factory(Choice, extra=10),
),
"<ChoiceFormSet: bound=False valid=Unknown total_forms=10>",
),
(
self.make_choiceformset([]),
"<ChoiceFormSet: bound=True valid=Unknown total_forms=0>",
),
(
self.make_choiceformset([("test", 1)]),
"<ChoiceFormSet: bound=True valid=Unknown total_forms=1>",
),
(valid_formset, "<ChoiceFormSet: bound=True valid=True total_forms=1>"),
(invalid_formset, "<ChoiceFormSet: bound=True valid=False total_forms=1>"),
(
partially_invalid_formset,
"<ChoiceFormSet: bound=True valid=False total_forms=2>",
),
(
invalid_formset_non_form_errors_only,
"<ChoiceFormsetWithNonFormError: bound=True valid=False total_forms=1>",
),
]
for formset, expected_repr in cases:
with self.subTest(expected_repr=expected_repr):
self.assertEqual(repr(formset), expected_repr)
def test_repr_do_not_trigger_validation(self):
formset = self.make_choiceformset([("test", 1)])
with mock.patch.object(formset, "full_clean") as mocked_full_clean:
repr(formset)
mocked_full_clean.assert_not_called()
formset.is_valid()
mocked_full_clean.assert_called()
@jinja2_tests
class Jinja2FormsFormsetTestCase(FormsFormsetTestCase):
pass
class FormsetAsTagTests(SimpleTestCase):
def setUp(self):
data = {
"choices-TOTAL_FORMS": "1",
"choices-INITIAL_FORMS": "0",
"choices-MIN_NUM_FORMS": "0",
"choices-MAX_NUM_FORMS": "0",
"choices-0-choice": "Calexico",
"choices-0-votes": "100",
}
self.formset = ChoiceFormSet(data, auto_id=False, prefix="choices")
self.management_form_html = (
'<input type="hidden" name="choices-TOTAL_FORMS" value="1">'
'<input type="hidden" name="choices-INITIAL_FORMS" value="0">'
'<input type="hidden" name="choices-MIN_NUM_FORMS" value="0">'
'<input type="hidden" name="choices-MAX_NUM_FORMS" value="0">'
)
def test_as_table(self):
self.assertHTMLEqual(
self.formset.as_table(),
self.management_form_html
+ (
"<tr><th>Choice:</th><td>"
'<input type="text" name="choices-0-choice" value="Calexico"></td></tr>'
"<tr><th>Votes:</th><td>"
'<input type="number" name="choices-0-votes" value="100"></td></tr>'
),
)
def test_as_p(self):
self.assertHTMLEqual(
self.formset.as_p(),
self.management_form_html
+ (
"<p>Choice: "
'<input type="text" name="choices-0-choice" value="Calexico"></p>'
'<p>Votes: <input type="number" name="choices-0-votes" value="100"></p>'
),
)
def test_as_ul(self):
self.assertHTMLEqual(
self.formset.as_ul(),
self.management_form_html
+ (
"<li>Choice: "
'<input type="text" name="choices-0-choice" value="Calexico"></li>'
"<li>Votes: "
'<input type="number" name="choices-0-votes" value="100"></li>'
),
)
def test_as_div(self):
self.assertHTMLEqual(
self.formset.as_div(),
self.management_form_html
+ (
"<div>Choice: "
'<input type="text" name="choices-0-choice" value="Calexico"></div>'
'<div>Votes: <input type="number" name="choices-0-votes" value="100">'
"</div>"
),
)
@jinja2_tests
class Jinja2FormsetAsTagTests(FormsetAsTagTests):
pass
class ArticleForm(Form):
title = CharField()
pub_date = DateField()
ArticleFormSet = formset_factory(ArticleForm)
class TestIsBoundBehavior(SimpleTestCase):
def test_no_data_error(self):
formset = ArticleFormSet({})
self.assertIs(formset.is_valid(), False)
self.assertEqual(
formset.non_form_errors(),
[
"ManagementForm data is missing or has been tampered with. "
"Missing fields: form-TOTAL_FORMS, form-INITIAL_FORMS. "
"You may need to file a bug report if the issue persists.",
],
)
self.assertEqual(formset.errors, [])
# Can still render the formset.
self.assertHTMLEqual(
str(formset),
'<ul class="errorlist nonfield">'
"<li>(Hidden field TOTAL_FORMS) This field is required.</li>"
"<li>(Hidden field INITIAL_FORMS) This field is required.</li>"
"</ul>"
"<div>"
'<input type="hidden" name="form-TOTAL_FORMS" id="id_form-TOTAL_FORMS">'
'<input type="hidden" name="form-INITIAL_FORMS" id="id_form-INITIAL_FORMS">'
'<input type="hidden" name="form-MIN_NUM_FORMS" id="id_form-MIN_NUM_FORMS">'
'<input type="hidden" name="form-MAX_NUM_FORMS" id="id_form-MAX_NUM_FORMS">'
"</div>\n",
)
def test_management_form_invalid_data(self):
data = {
"form-TOTAL_FORMS": "two",
"form-INITIAL_FORMS": "one",
}
formset = ArticleFormSet(data)
self.assertIs(formset.is_valid(), False)
self.assertEqual(
formset.non_form_errors(),
[
"ManagementForm data is missing or has been tampered with. "
"Missing fields: form-TOTAL_FORMS, form-INITIAL_FORMS. "
"You may need to file a bug report if the issue persists.",
],
)
self.assertEqual(formset.errors, [])
# Can still render the formset.
self.assertHTMLEqual(
str(formset),
'<ul class="errorlist nonfield">'
"<li>(Hidden field TOTAL_FORMS) Enter a whole number.</li>"
"<li>(Hidden field INITIAL_FORMS) Enter a whole number.</li>"
"</ul>"
"<div>"
'<input type="hidden" name="form-TOTAL_FORMS" value="two" '
'id="id_form-TOTAL_FORMS">'
'<input type="hidden" name="form-INITIAL_FORMS" value="one" '
'id="id_form-INITIAL_FORMS">'
'<input type="hidden" name="form-MIN_NUM_FORMS" id="id_form-MIN_NUM_FORMS">'
'<input type="hidden" name="form-MAX_NUM_FORMS" id="id_form-MAX_NUM_FORMS">'
"</div>\n",
)
def test_customize_management_form_error(self):
formset = ArticleFormSet(
{}, error_messages={"missing_management_form": "customized"}
)
self.assertIs(formset.is_valid(), False)
self.assertEqual(formset.non_form_errors(), ["customized"])
self.assertEqual(formset.errors, [])
def test_with_management_data_attrs_work_fine(self):
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "0",
}
formset = ArticleFormSet(data)
self.assertEqual(0, formset.initial_form_count())
self.assertEqual(1, formset.total_form_count())
self.assertTrue(formset.is_bound)
self.assertTrue(formset.forms[0].is_bound)
self.assertTrue(formset.is_valid())
self.assertTrue(formset.forms[0].is_valid())
self.assertEqual([{}], formset.cleaned_data)
def test_form_errors_are_caught_by_formset(self):
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-0-title": "Test",
"form-0-pub_date": "1904-06-16",
"form-1-title": "Test",
"form-1-pub_date": "", # <-- this date is missing but required
}
formset = ArticleFormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(
[{}, {"pub_date": ["This field is required."]}], formset.errors
)
def test_empty_forms_are_unbound(self):
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "0",
"form-0-title": "Test",
"form-0-pub_date": "1904-06-16",
}
unbound_formset = ArticleFormSet()
bound_formset = ArticleFormSet(data)
empty_forms = [unbound_formset.empty_form, bound_formset.empty_form]
# Empty forms should be unbound
self.assertFalse(empty_forms[0].is_bound)
self.assertFalse(empty_forms[1].is_bound)
# The empty forms should be equal.
self.assertHTMLEqual(empty_forms[0].as_p(), empty_forms[1].as_p())
@jinja2_tests
class TestIsBoundBehavior(TestIsBoundBehavior):
pass
class TestEmptyFormSet(SimpleTestCase):
def test_empty_formset_is_valid(self):
"""An empty formset still calls clean()"""
class EmptyFsetWontValidate(BaseFormSet):
def clean(self):
raise ValidationError("Clean method called")
EmptyFsetWontValidateFormset = formset_factory(
FavoriteDrinkForm, extra=0, formset=EmptyFsetWontValidate
)
formset = EmptyFsetWontValidateFormset(
data={"form-INITIAL_FORMS": "0", "form-TOTAL_FORMS": "0"},
prefix="form",
)
formset2 = EmptyFsetWontValidateFormset(
data={
"form-INITIAL_FORMS": "0",
"form-TOTAL_FORMS": "1",
"form-0-name": "bah",
},
prefix="form",
)
self.assertFalse(formset.is_valid())
self.assertFalse(formset2.is_valid())
def test_empty_formset_media(self):
"""Media is available on empty formset."""
class MediaForm(Form):
class Media:
js = ("some-file.js",)
self.assertIn("some-file.js", str(formset_factory(MediaForm, extra=0)().media))
def test_empty_formset_is_multipart(self):
"""is_multipart() works with an empty formset."""
class FileForm(Form):
file = FileField()
self.assertTrue(formset_factory(FileForm, extra=0)().is_multipart())
class AllValidTests(SimpleTestCase):
def test_valid(self):
data = {
"choices-TOTAL_FORMS": "2",
"choices-INITIAL_FORMS": "0",
"choices-MIN_NUM_FORMS": "0",
"choices-0-choice": "Zero",
"choices-0-votes": "0",
"choices-1-choice": "One",
"choices-1-votes": "1",
}
ChoiceFormSet = formset_factory(Choice)
formset1 = ChoiceFormSet(data, auto_id=False, prefix="choices")
formset2 = ChoiceFormSet(data, auto_id=False, prefix="choices")
self.assertIs(all_valid((formset1, formset2)), True)
expected_errors = [{}, {}]
self.assertEqual(formset1._errors, expected_errors)
self.assertEqual(formset2._errors, expected_errors)
def test_invalid(self):
"""all_valid() validates all forms, even when some are invalid."""
data = {
"choices-TOTAL_FORMS": "2",
"choices-INITIAL_FORMS": "0",
"choices-MIN_NUM_FORMS": "0",
"choices-0-choice": "Zero",
"choices-0-votes": "",
"choices-1-choice": "One",
"choices-1-votes": "",
}
ChoiceFormSet = formset_factory(Choice)
formset1 = ChoiceFormSet(data, auto_id=False, prefix="choices")
formset2 = ChoiceFormSet(data, auto_id=False, prefix="choices")
self.assertIs(all_valid((formset1, formset2)), False)
expected_errors = [
{"votes": ["This field is required."]},
{"votes": ["This field is required."]},
]
self.assertEqual(formset1._errors, expected_errors)
self.assertEqual(formset2._errors, expected_errors)
class DeprecationTests(SimpleTestCase):
def test_warning(self):
from django.forms.utils import DEFAULT_TEMPLATE_DEPRECATION_MSG
with isolate_lru_cache(get_default_renderer), self.settings(
FORM_RENDERER="django.forms.renderers.DjangoTemplates"
), self.assertRaisesMessage(
RemovedInDjango50Warning, DEFAULT_TEMPLATE_DEPRECATION_MSG
):
ChoiceFormSet = formset_factory(Choice)
formset = ChoiceFormSet()
str(formset)
def test_no_management_form_warning(self):
"""
Management forms are already rendered with the new div template.
"""
with isolate_lru_cache(get_default_renderer), self.settings(
FORM_RENDERER="django.forms.renderers.DjangoTemplates"
):
ChoiceFormSet = formset_factory(Choice, formset=BaseFormSet)
formset = ChoiceFormSet()
str(formset.management_form)
|
a67327f0d5c93087ee7851336d5a7e3aa5849bfe36b8743f4ced03e99dc7425f | import copy
import datetime
import functools
import inspect
import warnings
from collections import defaultdict
from decimal import Decimal
from uuid import UUID
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DatabaseError, NotSupportedError, connection
from django.db.models import fields
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import Q
from django.utils.deconstruct import deconstructible
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
class SQLiteNumericMixin:
"""
Some expressions with output_field=DecimalField() must be cast to
numeric to be properly filtered.
"""
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
try:
if self.output_field.get_internal_type() == "DecimalField":
sql = "CAST(%s AS NUMERIC)" % sql
except FieldError:
pass
return sql, params
class Combinable:
"""
Provide the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = "+"
SUB = "-"
MUL = "*"
DIV = "/"
POW = "^"
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = "%%"
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = "&"
BITOR = "|"
BITLEFTSHIFT = "<<"
BITRIGHTSHIFT = ">>"
BITXOR = "#"
def _combine(self, other, connector, reversed):
if not hasattr(other, "resolve_expression"):
# everything must be resolvable to an expression
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __neg__(self):
return self._combine(-1, self.MUL, False)
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) & Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def bitleftshift(self, other):
return self._combine(other, self.BITLEFTSHIFT, False)
def bitrightshift(self, other):
return self._combine(other, self.BITRIGHTSHIFT, False)
def __xor__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) ^ Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitxor(self, other):
return self._combine(other, self.BITXOR, False)
def __or__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) | Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def __rxor__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
class BaseExpression:
"""Base class for all query expressions."""
empty_result_set_value = NotImplemented
# aggregate specific fields
is_summary = False
_output_field_resolved_to_none = False
# Can the expression be used in a WHERE clause?
filterable = True
# Can the expression can be used as a source expression in Window?
window_compatible = False
def __init__(self, output_field=None):
if output_field is not None:
self.output_field = output_field
def __getstate__(self):
state = self.__dict__.copy()
state.pop("convert_value", None)
return state
def get_db_converters(self, connection):
return (
[]
if self.convert_value is self._convert_value_noop
else [self.convert_value]
) + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert not exprs
def _parse_expressions(self, *expressions):
return [
arg
if hasattr(arg, "resolve_expression")
else (F(arg) if isinstance(arg, str) else Value(arg))
for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super().as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Return: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
return any(
expr and expr.contains_aggregate for expr in self.get_source_expressions()
)
@cached_property
def contains_over_clause(self):
return any(
expr and expr.contains_over_clause for expr in self.get_source_expressions()
)
@cached_property
def contains_column_references(self):
return any(
expr and expr.contains_column_references
for expr in self.get_source_expressions()
)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
"""
Provide the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Return: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions(
[
expr.resolve_expression(query, allow_joins, reuse, summarize)
if expr
else None
for expr in c.get_source_expressions()
]
)
return c
@property
def conditional(self):
return isinstance(self.output_field, fields.BooleanField)
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""Return the output type of this expressions."""
output_field = self._resolve_output_field()
if output_field is None:
self._output_field_resolved_to_none = True
raise FieldError("Cannot resolve expression type, unknown output_field")
return output_field
@cached_property
def _output_field_or_none(self):
"""
Return the output field of this expression, or None if
_resolve_output_field() didn't return an output type.
"""
try:
return self.output_field
except FieldError:
if not self._output_field_resolved_to_none:
raise
def _resolve_output_field(self):
"""
Attempt to infer the output type of the expression.
As a guess, if the output fields of all source fields match then simply
infer the same type here.
If a source's output field resolves to None, exclude it from this check.
If all sources are None, then an error is raised higher up the stack in
the output_field property.
"""
# This guess is mostly a bad idea, but there is quite a lot of code
# (especially 3rd party Func subclasses) that depend on it, we'd need a
# deprecation path to fix it.
sources_iter = (
source for source in self.get_source_fields() if source is not None
)
for output_field in sources_iter:
for source in sources_iter:
if not isinstance(output_field, source.__class__):
raise FieldError(
"Expression contains mixed types: %s, %s. You must "
"set output_field."
% (
output_field.__class__.__name__,
source.__class__.__name__,
)
)
return output_field
@staticmethod
def _convert_value_noop(value, expression, connection):
return value
@cached_property
def convert_value(self):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if internal_type == "FloatField":
return (
lambda value, expression, connection: None
if value is None
else float(value)
)
elif internal_type.endswith("IntegerField"):
return (
lambda value, expression, connection: None
if value is None
else int(value)
)
elif internal_type == "DecimalField":
return (
lambda value, expression, connection: None
if value is None
else Decimal(value)
)
return self._convert_value_noop
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[
e.relabeled_clone(change_map) if e is not None else None
for e in self.get_source_expressions()
]
)
return clone
def replace_references(self, references_map):
clone = self.copy()
clone.set_source_expressions(
[
expr.replace_references(references_map)
for expr in self.get_source_expressions()
]
)
return clone
def copy(self):
return copy.copy(self)
def prefix_references(self, prefix):
clone = self.copy()
clone.set_source_expressions(
[
F(f"{prefix}{expr.name}")
if isinstance(expr, F)
else expr.prefix_references(prefix)
for expr in self.get_source_expressions()
]
)
return clone
def get_group_by_cols(self, alias=None):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""Return the underlying field types used by this aggregate."""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
if hasattr(expr, "flatten"):
yield from expr.flatten()
else:
yield expr
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, EXISTS expressions need
to be wrapped in CASE WHEN on Oracle.
"""
if hasattr(self.output_field, "select_format"):
return self.output_field.select_format(compiler, sql, params)
return sql, params
@deconstructible
class Expression(BaseExpression, Combinable):
"""An expression that can be combined with other expressions."""
@cached_property
def identity(self):
constructor_signature = inspect.signature(self.__init__)
args, kwargs = self._constructor_args
signature = constructor_signature.bind_partial(*args, **kwargs)
signature.apply_defaults()
arguments = signature.arguments.items()
identity = [self.__class__]
for arg, value in arguments:
if isinstance(value, fields.Field):
if value.name and value.model:
value = (value.model._meta.label, value.name)
else:
value = type(value)
else:
value = make_hashable(value)
identity.append((arg, value))
return tuple(identity)
def __eq__(self, other):
if not isinstance(other, Expression):
return NotImplemented
return other.identity == self.identity
def __hash__(self):
return hash(self.identity)
# Type inference for CombinedExpression.output_field.
# Missing items will result in FieldError, by design.
#
# The current approach for NULL is based on lowest common denominator behavior
# i.e. if one of the supported databases is raising an error (rather than
# return NULL) for `val <op> NULL`, then Django raises FieldError.
NoneType = type(None)
_connector_combinations = [
# Numeric operations - operands of same type.
{
connector: [
(fields.IntegerField, fields.IntegerField, fields.IntegerField),
(fields.FloatField, fields.FloatField, fields.FloatField),
(fields.DecimalField, fields.DecimalField, fields.DecimalField),
]
for connector in (
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
# Behavior for DIV with integer arguments follows Postgres/SQLite,
# not MySQL/Oracle.
Combinable.DIV,
Combinable.MOD,
Combinable.POW,
)
},
# Numeric operations - operands of different type.
{
connector: [
(fields.IntegerField, fields.DecimalField, fields.DecimalField),
(fields.DecimalField, fields.IntegerField, fields.DecimalField),
(fields.IntegerField, fields.FloatField, fields.FloatField),
(fields.FloatField, fields.IntegerField, fields.FloatField),
]
for connector in (
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
Combinable.DIV,
)
},
# Bitwise operators.
{
connector: [
(fields.IntegerField, fields.IntegerField, fields.IntegerField),
]
for connector in (
Combinable.BITAND,
Combinable.BITOR,
Combinable.BITLEFTSHIFT,
Combinable.BITRIGHTSHIFT,
Combinable.BITXOR,
)
},
# Numeric with NULL.
{
connector: [
(field_type, NoneType, field_type),
(NoneType, field_type, field_type),
]
for connector in (
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
Combinable.DIV,
Combinable.MOD,
Combinable.POW,
)
for field_type in (fields.IntegerField, fields.DecimalField, fields.FloatField)
},
# Date/DateTimeField/DurationField/TimeField.
{
Combinable.ADD: [
# Date/DateTimeField.
(fields.DateField, fields.DurationField, fields.DateTimeField),
(fields.DateTimeField, fields.DurationField, fields.DateTimeField),
(fields.DurationField, fields.DateField, fields.DateTimeField),
(fields.DurationField, fields.DateTimeField, fields.DateTimeField),
# DurationField.
(fields.DurationField, fields.DurationField, fields.DurationField),
# TimeField.
(fields.TimeField, fields.DurationField, fields.TimeField),
(fields.DurationField, fields.TimeField, fields.TimeField),
],
},
{
Combinable.SUB: [
# Date/DateTimeField.
(fields.DateField, fields.DurationField, fields.DateTimeField),
(fields.DateTimeField, fields.DurationField, fields.DateTimeField),
(fields.DateField, fields.DateField, fields.DurationField),
(fields.DateField, fields.DateTimeField, fields.DurationField),
(fields.DateTimeField, fields.DateField, fields.DurationField),
(fields.DateTimeField, fields.DateTimeField, fields.DurationField),
# DurationField.
(fields.DurationField, fields.DurationField, fields.DurationField),
# TimeField.
(fields.TimeField, fields.DurationField, fields.TimeField),
(fields.TimeField, fields.TimeField, fields.DurationField),
],
},
]
_connector_combinators = defaultdict(list)
def register_combinable_fields(lhs, connector, rhs, result):
"""
Register combinable types:
lhs <connector> rhs -> result
e.g.
register_combinable_fields(
IntegerField, Combinable.ADD, FloatField, FloatField
)
"""
_connector_combinators[connector].append((lhs, rhs, result))
for d in _connector_combinations:
for connector, field_types in d.items():
for lhs, rhs, result in field_types:
register_combinable_fields(lhs, connector, rhs, result)
@functools.lru_cache(maxsize=128)
def _resolve_combined_type(connector, lhs_type, rhs_type):
combinators = _connector_combinators.get(connector, ())
for combinator_lhs_type, combinator_rhs_type, combined_type in combinators:
if issubclass(lhs_type, combinator_lhs_type) and issubclass(
rhs_type, combinator_rhs_type
):
return combined_type
class CombinedExpression(SQLiteNumericMixin, Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super().__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def _resolve_output_field(self):
# We avoid using super() here for reasons given in
# Expression._resolve_output_field()
combined_type = _resolve_combined_type(
self.connector,
type(self.lhs._output_field_or_none),
type(self.rhs._output_field_or_none),
)
if combined_type is None:
raise FieldError(
f"Cannot infer type of {self.connector!r} expression involving these "
f"types: {self.lhs.output_field.__class__.__name__}, "
f"{self.rhs.output_field.__class__.__name__}. You must set "
f"output_field."
)
return combined_type()
def as_sql(self, compiler, connection):
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = "(%s)"
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
lhs = self.lhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
rhs = self.rhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
if not isinstance(self, (DurationExpression, TemporalSubtraction)):
try:
lhs_type = lhs.output_field.get_internal_type()
except (AttributeError, FieldError):
lhs_type = None
try:
rhs_type = rhs.output_field.get_internal_type()
except (AttributeError, FieldError):
rhs_type = None
if "DurationField" in {lhs_type, rhs_type} and lhs_type != rhs_type:
return DurationExpression(
self.lhs, self.connector, self.rhs
).resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
datetime_fields = {"DateField", "DateTimeField", "TimeField"}
if (
self.connector == self.SUB
and lhs_type in datetime_fields
and lhs_type == rhs_type
):
return TemporalSubtraction(self.lhs, self.rhs).resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
c = self.copy()
c.is_summary = summarize
c.lhs = lhs
c.rhs = rhs
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == "DurationField":
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
if connection.features.has_native_duration_field:
return super().as_sql(compiler, connection)
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = "(%s)"
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
if self.connector in {Combinable.MUL, Combinable.DIV}:
try:
lhs_type = self.lhs.output_field.get_internal_type()
rhs_type = self.rhs.output_field.get_internal_type()
except (AttributeError, FieldError):
pass
else:
allowed_fields = {
"DecimalField",
"DurationField",
"FloatField",
"IntegerField",
}
if lhs_type not in allowed_fields or rhs_type not in allowed_fields:
raise DatabaseError(
f"Invalid arguments for operator {self.connector}."
)
return sql, params
class TemporalSubtraction(CombinedExpression):
output_field = fields.DurationField()
def __init__(self, lhs, rhs):
super().__init__(lhs, self.SUB, rhs)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
lhs = compiler.compile(self.lhs)
rhs = compiler.compile(self.rhs)
return connection.ops.subtract_temporals(
self.lhs.output_field.get_internal_type(), lhs, rhs
)
@deconstructible(path="django.db.models.F")
class F(Combinable):
"""An object capable of resolving references to existing query objects."""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def replace_references(self, references_map):
return references_map.get(self.name, self)
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.name == other.name
def __hash__(self):
return hash(self.name)
class ResolvedOuterRef(F):
"""
An object that contains a reference to an outer query.
In this case, the reference to the outer query has been resolved because
the inner query has been used as a subquery.
"""
contains_aggregate = False
def as_sql(self, *args, **kwargs):
raise ValueError(
"This queryset contains a reference to an outer query and may "
"only be used in a subquery."
)
def resolve_expression(self, *args, **kwargs):
col = super().resolve_expression(*args, **kwargs)
# FIXME: Rename possibly_multivalued to multivalued and fix detection
# for non-multivalued JOINs (e.g. foreign key fields). This should take
# into account only many-to-many and one-to-many relationships.
col.possibly_multivalued = LOOKUP_SEP in self.name
return col
def relabeled_clone(self, relabels):
return self
def get_group_by_cols(self, alias=None):
return []
class OuterRef(F):
contains_aggregate = False
def resolve_expression(self, *args, **kwargs):
if isinstance(self.name, self.__class__):
return self.name
return ResolvedOuterRef(self.name)
def relabeled_clone(self, relabels):
return self
@deconstructible(path="django.db.models.Func")
class Func(SQLiteNumericMixin, Expression):
"""An SQL function call."""
function = None
template = "%(function)s(%(expressions)s)"
arg_joiner = ", "
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, output_field=None, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)"
% (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
super().__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = {**self.extra, **self._get_repr_options()}
if extra:
extra = ", ".join(
str(key) + "=" + str(val) for key, val in sorted(extra.items())
)
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def _get_repr_options(self):
"""Return a dict of extra __init__() options to include in the repr."""
return {}
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def as_sql(
self,
compiler,
connection,
function=None,
template=None,
arg_joiner=None,
**extra_context,
):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
try:
arg_sql, arg_params = compiler.compile(arg)
except EmptyResultSet:
empty_result_set_value = getattr(
arg, "empty_result_set_value", NotImplemented
)
if empty_result_set_value is NotImplemented:
raise
arg_sql, arg_params = compiler.compile(Value(empty_result_set_value))
sql_parts.append(arg_sql)
params.extend(arg_params)
data = {**self.extra, **extra_context}
# Use the first supplied value in this order: the parameter to this
# method, a value supplied in __init__()'s **extra (the value in
# `data`), or the value defined on the class.
if function is not None:
data["function"] = function
else:
data.setdefault("function", self.function)
template = template or data.get("template", self.template)
arg_joiner = arg_joiner or data.get("arg_joiner", self.arg_joiner)
data["expressions"] = data["field"] = arg_joiner.join(sql_parts)
return template % data, params
def copy(self):
copy = super().copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
@deconstructible(path="django.db.models.Value")
class Value(SQLiteNumericMixin, Expression):
"""Represent a wrapped value as a node within an expression."""
# Provide a default value for `for_save` in order to allow unresolved
# instances to be compiled until a decision is taken in #25425.
for_save = False
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super().__init__(output_field=output_field)
self.value = value
def __repr__(self):
return f"{self.__class__.__name__}({self.value!r})"
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
output_field = self._output_field_or_none
if output_field is not None:
if self.for_save:
val = output_field.get_db_prep_save(val, connection=connection)
else:
val = output_field.get_db_prep_value(val, connection=connection)
if hasattr(output_field, "get_placeholder"):
return output_field.get_placeholder(val, compiler, connection), [val]
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return "NULL", []
return "%s", [val]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self, alias=None):
return []
def _resolve_output_field(self):
if isinstance(self.value, str):
return fields.CharField()
if isinstance(self.value, bool):
return fields.BooleanField()
if isinstance(self.value, int):
return fields.IntegerField()
if isinstance(self.value, float):
return fields.FloatField()
if isinstance(self.value, datetime.datetime):
return fields.DateTimeField()
if isinstance(self.value, datetime.date):
return fields.DateField()
if isinstance(self.value, datetime.time):
return fields.TimeField()
if isinstance(self.value, datetime.timedelta):
return fields.DurationField()
if isinstance(self.value, Decimal):
return fields.DecimalField()
if isinstance(self.value, bytes):
return fields.BinaryField()
if isinstance(self.value, UUID):
return fields.UUIDField()
@property
def empty_result_set_value(self):
return self.value
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super().__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return "(%s)" % self.sql, self.params
def get_group_by_cols(self, alias=None):
return [self]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# Resolve parents fields used in raw SQL.
if query.model:
for parent in query.model._meta.get_parent_list():
for parent_field in parent._meta.local_fields:
_, column_name = parent_field.get_attname_column()
if column_name.lower() in self.sql.lower():
query.resolve_ref(
parent_field.name, allow_joins, reuse, summarize
)
break
return super().resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return "*", []
class Col(Expression):
contains_column_references = True
possibly_multivalued = False
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super().__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
alias, target = self.alias, self.target
identifiers = (alias, str(target)) if alias else (str(target),)
return "{}({})".format(self.__class__.__name__, ", ".join(identifiers))
def as_sql(self, compiler, connection):
alias, column = self.alias, self.target.column
identifiers = (alias, column) if alias else (column,)
sql = ".".join(map(compiler.quote_name_unless_alias, identifiers))
return sql, []
def relabeled_clone(self, relabels):
if self.alias is None:
return self
return self.__class__(
relabels.get(self.alias, self.alias), self.target, self.output_field
)
def get_group_by_cols(self, alias=None):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return self.output_field.get_db_converters(
connection
) + self.target.get_db_converters(connection)
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super().__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
(self.source,) = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return connection.ops.quote_name(self.refs), []
def get_group_by_cols(self, alias=None):
return [self]
class ExpressionList(Func):
"""
An expression containing multiple expressions. Can be used to provide a
list of expressions as an argument to another expression, like a partition
clause.
"""
template = "%(expressions)s"
def __init__(self, *expressions, **extra):
if not expressions:
raise ValueError(
"%s requires at least one expression." % self.__class__.__name__
)
super().__init__(*expressions, **extra)
def __str__(self):
return self.arg_joiner.join(str(arg) for arg in self.source_expressions)
def as_sqlite(self, compiler, connection, **extra_context):
# Casting to numeric is unnecessary.
return self.as_sql(compiler, connection, **extra_context)
class OrderByList(Func):
template = "ORDER BY %(expressions)s"
def __init__(self, *expressions, **extra):
expressions = (
(
OrderBy(F(expr[1:]), descending=True)
if isinstance(expr, str) and expr[0] == "-"
else expr
)
for expr in expressions
)
super().__init__(*expressions, **extra)
def as_sql(self, *args, **kwargs):
if not self.source_expressions:
return "", ()
return super().as_sql(*args, **kwargs)
@deconstructible(path="django.db.models.ExpressionWrapper")
class ExpressionWrapper(SQLiteNumericMixin, Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super().__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def get_group_by_cols(self, alias=None):
if isinstance(self.expression, Expression):
expression = self.expression.copy()
expression.output_field = self.output_field
return expression.get_group_by_cols(alias=alias)
# For non-expressions e.g. an SQL WHERE clause, the entire
# `expression` must be included in the GROUP BY clause.
return super().get_group_by_cols()
def as_sql(self, compiler, connection):
return compiler.compile(self.expression)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
@deconstructible(path="django.db.models.When")
class When(Expression):
template = "WHEN %(condition)s THEN %(result)s"
# This isn't a complete conditional expression, must be used in Case().
conditional = False
def __init__(self, condition=None, then=None, **lookups):
if lookups:
if condition is None:
condition, lookups = Q(**lookups), None
elif getattr(condition, "conditional", False):
condition, lookups = Q(condition, **lookups), None
if condition is None or not getattr(condition, "conditional", False) or lookups:
raise TypeError(
"When() supports a Q object, a boolean expression, or lookups "
"as a condition."
)
if isinstance(condition, Q) and not condition:
raise ValueError("An empty Q() can't be used as a When() condition.")
super().__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
if hasattr(c.condition, "resolve_expression"):
c.condition = c.condition.resolve_expression(
query, allow_joins, reuse, summarize, False
)
c.result = c.result.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = extra_context
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params["condition"] = condition_sql
sql_params.extend(condition_params)
result_sql, result_params = compiler.compile(self.result)
template_params["result"] = result_sql
sql_params.extend(result_params)
template = template or self.template
return template % template_params, sql_params
def get_group_by_cols(self, alias=None):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
@deconstructible(path="django.db.models.Case")
class Case(SQLiteNumericMixin, Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = "CASE %(cases)s ELSE %(default)s END"
case_joiner = " "
def __init__(self, *cases, default=None, output_field=None, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
super().__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
self.extra = extra
def __str__(self):
return "CASE %s, ELSE %r" % (
", ".join(str(c) for c in self.cases),
self.default,
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
*self.cases, self.default = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
c.default = c.default.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def copy(self):
c = super().copy()
c.cases = c.cases[:]
return c
def as_sql(
self, compiler, connection, template=None, case_joiner=None, **extra_context
):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = {**self.extra, **extra_context}
case_parts = []
sql_params = []
for case in self.cases:
try:
case_sql, case_params = compiler.compile(case)
except EmptyResultSet:
continue
case_parts.append(case_sql)
sql_params.extend(case_params)
default_sql, default_params = compiler.compile(self.default)
if not case_parts:
return default_sql, default_params
case_joiner = case_joiner or self.case_joiner
template_params["cases"] = case_joiner.join(case_parts)
template_params["default"] = default_sql
sql_params.extend(default_params)
template = template or template_params.get("template", self.template)
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
def get_group_by_cols(self, alias=None):
if not self.cases:
return self.default.get_group_by_cols(alias)
return super().get_group_by_cols(alias)
class Subquery(BaseExpression, Combinable):
"""
An explicit subquery. It may contain OuterRef() references to the outer
query which will be resolved when it is applied to that query.
"""
template = "(%(subquery)s)"
contains_aggregate = False
empty_result_set_value = None
def __init__(self, queryset, output_field=None, **extra):
# Allow the usage of both QuerySet and sql.Query objects.
self.query = getattr(queryset, "query", queryset).clone()
self.query.subquery = True
self.extra = extra
super().__init__(output_field)
def get_source_expressions(self):
return [self.query]
def set_source_expressions(self, exprs):
self.query = exprs[0]
def _resolve_output_field(self):
return self.query.output_field
def copy(self):
clone = super().copy()
clone.query = clone.query.clone()
return clone
@property
def external_aliases(self):
return self.query.external_aliases
def get_external_cols(self):
return self.query.get_external_cols()
def as_sql(self, compiler, connection, template=None, query=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = {**self.extra, **extra_context}
query = query or self.query
subquery_sql, sql_params = query.as_sql(compiler, connection)
template_params["subquery"] = subquery_sql[1:-1]
template = template or template_params.get("template", self.template)
sql = template % template_params
return sql, sql_params
def get_group_by_cols(self, alias=None):
# If this expression is referenced by an alias for an explicit GROUP BY
# through values() a reference to this expression and not the
# underlying .query must be returned to ensure external column
# references are not grouped against as well.
if alias:
return [Ref(alias, self)]
return self.query.get_group_by_cols()
class Exists(Subquery):
template = "EXISTS(%(subquery)s)"
output_field = fields.BooleanField()
def __init__(self, queryset, negated=False, **kwargs):
self.negated = negated
super().__init__(queryset, **kwargs)
def __invert__(self):
clone = self.copy()
clone.negated = not self.negated
return clone
def as_sql(self, compiler, connection, template=None, **extra_context):
query = self.query.exists(using=connection.alias)
try:
sql, params = super().as_sql(
compiler,
connection,
template=template,
query=query,
**extra_context,
)
except EmptyResultSet:
if self.negated:
features = compiler.connection.features
if not features.supports_boolean_expr_in_select_clause:
return "1=1", ()
return compiler.compile(Value(True))
raise
if self.negated:
sql = "NOT {}".format(sql)
return sql, params
def select_format(self, compiler, sql, params):
# Wrap EXISTS() with a CASE WHEN expression if a database backend
# (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP
# BY list.
if not compiler.connection.features.supports_boolean_expr_in_select_clause:
sql = "CASE WHEN {} THEN 1 ELSE 0 END".format(sql)
return sql, params
@deconstructible(path="django.db.models.OrderBy")
class OrderBy(Expression):
template = "%(expression)s %(ordering)s"
conditional = False
def __init__(self, expression, descending=False, nulls_first=None, nulls_last=None):
if nulls_first and nulls_last:
raise ValueError("nulls_first and nulls_last are mutually exclusive")
if nulls_first is False or nulls_last is False:
# When the deprecation ends, replace with:
# raise ValueError(
# "nulls_first and nulls_last values must be True or None."
# )
warnings.warn(
"Passing nulls_first=False or nulls_last=False is deprecated, use None "
"instead.",
RemovedInDjango50Warning,
stacklevel=2,
)
self.nulls_first = nulls_first
self.nulls_last = nulls_last
self.descending = descending
if not hasattr(expression, "resolve_expression"):
raise ValueError("expression must be an expression type")
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending
)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection, template=None, **extra_context):
template = template or self.template
if connection.features.supports_order_by_nulls_modifier:
if self.nulls_last:
template = "%s NULLS LAST" % template
elif self.nulls_first:
template = "%s NULLS FIRST" % template
else:
if self.nulls_last and not (
self.descending and connection.features.order_by_nulls_first
):
template = "%%(expression)s IS NULL, %s" % template
elif self.nulls_first and not (
not self.descending and connection.features.order_by_nulls_first
):
template = "%%(expression)s IS NOT NULL, %s" % template
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {
"expression": expression_sql,
"ordering": "DESC" if self.descending else "ASC",
**extra_context,
}
params *= template.count("%(expression)s")
return (template % placeholders).rstrip(), params
def as_oracle(self, compiler, connection):
# Oracle doesn't allow ORDER BY EXISTS() or filters unless it's wrapped
# in a CASE WHEN.
if connection.ops.conditional_expression_supported_in_where_clause(
self.expression
):
copy = self.copy()
copy.expression = Case(
When(self.expression, then=True),
default=False,
)
return copy.as_sql(compiler, connection)
return self.as_sql(compiler, connection)
def get_group_by_cols(self, alias=None):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
if self.nulls_first:
self.nulls_last = True
self.nulls_first = None
elif self.nulls_last:
self.nulls_first = True
self.nulls_last = None
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
class Window(SQLiteNumericMixin, Expression):
template = "%(expression)s OVER (%(window)s)"
# Although the main expression may either be an aggregate or an
# expression with an aggregate function, the GROUP BY that will
# be introduced in the query as a result is not desired.
contains_aggregate = False
contains_over_clause = True
filterable = False
def __init__(
self,
expression,
partition_by=None,
order_by=None,
frame=None,
output_field=None,
):
self.partition_by = partition_by
self.order_by = order_by
self.frame = frame
if not getattr(expression, "window_compatible", False):
raise ValueError(
"Expression '%s' isn't compatible with OVER clauses."
% expression.__class__.__name__
)
if self.partition_by is not None:
if not isinstance(self.partition_by, (tuple, list)):
self.partition_by = (self.partition_by,)
self.partition_by = ExpressionList(*self.partition_by)
if self.order_by is not None:
if isinstance(self.order_by, (list, tuple)):
self.order_by = OrderByList(*self.order_by)
elif isinstance(self.order_by, (BaseExpression, str)):
self.order_by = OrderByList(self.order_by)
else:
raise ValueError(
"Window.order_by must be either a string reference to a "
"field, an expression, or a list or tuple of them."
)
super().__init__(output_field=output_field)
self.source_expression = self._parse_expressions(expression)[0]
def _resolve_output_field(self):
return self.source_expression.output_field
def get_source_expressions(self):
return [self.source_expression, self.partition_by, self.order_by, self.frame]
def set_source_expressions(self, exprs):
self.source_expression, self.partition_by, self.order_by, self.frame = exprs
def as_sql(self, compiler, connection, template=None):
connection.ops.check_expression_support(self)
if not connection.features.supports_over_clause:
raise NotSupportedError("This backend does not support window expressions.")
expr_sql, params = compiler.compile(self.source_expression)
window_sql, window_params = [], ()
if self.partition_by is not None:
sql_expr, sql_params = self.partition_by.as_sql(
compiler=compiler,
connection=connection,
template="PARTITION BY %(expressions)s",
)
window_sql.append(sql_expr)
window_params += tuple(sql_params)
if self.order_by is not None:
order_sql, order_params = compiler.compile(self.order_by)
window_sql.append(order_sql)
window_params += tuple(order_params)
if self.frame:
frame_sql, frame_params = compiler.compile(self.frame)
window_sql.append(frame_sql)
window_params += tuple(frame_params)
template = template or self.template
return (
template % {"expression": expr_sql, "window": " ".join(window_sql).strip()},
(*params, *window_params),
)
def as_sqlite(self, compiler, connection):
if isinstance(self.output_field, fields.DecimalField):
# Casting to numeric must be outside of the window expression.
copy = self.copy()
source_expressions = copy.get_source_expressions()
source_expressions[0].output_field = fields.FloatField()
copy.set_source_expressions(source_expressions)
return super(Window, copy).as_sqlite(compiler, connection)
return self.as_sql(compiler, connection)
def __str__(self):
return "{} OVER ({}{}{})".format(
str(self.source_expression),
"PARTITION BY " + str(self.partition_by) if self.partition_by else "",
str(self.order_by or ""),
str(self.frame or ""),
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self, alias=None):
return []
class WindowFrame(Expression):
"""
Model the frame clause in window expressions. There are two types of frame
clauses which are subclasses, however, all processing and validation (by no
means intended to be complete) is done here. Thus, providing an end for a
frame is optional (the default is UNBOUNDED FOLLOWING, which is the last
row in the frame).
"""
template = "%(frame_type)s BETWEEN %(start)s AND %(end)s"
def __init__(self, start=None, end=None):
self.start = Value(start)
self.end = Value(end)
def set_source_expressions(self, exprs):
self.start, self.end = exprs
def get_source_expressions(self):
return [self.start, self.end]
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
start, end = self.window_frame_start_end(
connection, self.start.value, self.end.value
)
return (
self.template
% {
"frame_type": self.frame_type,
"start": start,
"end": end,
},
[],
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self, alias=None):
return []
def __str__(self):
if self.start.value is not None and self.start.value < 0:
start = "%d %s" % (abs(self.start.value), connection.ops.PRECEDING)
elif self.start.value is not None and self.start.value == 0:
start = connection.ops.CURRENT_ROW
else:
start = connection.ops.UNBOUNDED_PRECEDING
if self.end.value is not None and self.end.value > 0:
end = "%d %s" % (self.end.value, connection.ops.FOLLOWING)
elif self.end.value is not None and self.end.value == 0:
end = connection.ops.CURRENT_ROW
else:
end = connection.ops.UNBOUNDED_FOLLOWING
return self.template % {
"frame_type": self.frame_type,
"start": start,
"end": end,
}
def window_frame_start_end(self, connection, start, end):
raise NotImplementedError("Subclasses must implement window_frame_start_end().")
class RowRange(WindowFrame):
frame_type = "ROWS"
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_rows_start_end(start, end)
class ValueRange(WindowFrame):
frame_type = "RANGE"
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_range_start_end(start, end)
|
13eb4e9cfda957547e6b9087f3c49fc316927129e16bb0d70772bd83d2a2378d | from enum import Enum
from django.core.exceptions import FieldError, ValidationError
from django.db import connections
from django.db.models.expressions import Exists, ExpressionList, F
from django.db.models.indexes import IndexExpression
from django.db.models.lookups import Exact
from django.db.models.query_utils import Q
from django.db.models.sql.query import Query
from django.db.utils import DEFAULT_DB_ALIAS
from django.utils.translation import gettext_lazy as _
__all__ = ["BaseConstraint", "CheckConstraint", "Deferrable", "UniqueConstraint"]
class BaseConstraint:
default_violation_error_message = _("Constraint “%(name)s” is violated.")
violation_error_message = None
def __init__(self, name, violation_error_message=None):
self.name = name
if violation_error_message is not None:
self.violation_error_message = violation_error_message
else:
self.violation_error_message = self.default_violation_error_message
@property
def contains_expressions(self):
return False
def constraint_sql(self, model, schema_editor):
raise NotImplementedError("This method must be implemented by a subclass.")
def create_sql(self, model, schema_editor):
raise NotImplementedError("This method must be implemented by a subclass.")
def remove_sql(self, model, schema_editor):
raise NotImplementedError("This method must be implemented by a subclass.")
def validate(self, model, instance, exclude=None, using=DEFAULT_DB_ALIAS):
raise NotImplementedError("This method must be implemented by a subclass.")
def get_violation_error_message(self):
return self.violation_error_message % {"name": self.name}
def deconstruct(self):
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
path = path.replace("django.db.models.constraints", "django.db.models")
kwargs = {"name": self.name}
if (
self.violation_error_message is not None
and self.violation_error_message != self.default_violation_error_message
):
kwargs["violation_error_message"] = self.violation_error_message
return (path, (), kwargs)
def clone(self):
_, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
class CheckConstraint(BaseConstraint):
def __init__(self, *, check, name, violation_error_message=None):
self.check = check
if not getattr(check, "conditional", False):
raise TypeError(
"CheckConstraint.check must be a Q instance or boolean expression."
)
super().__init__(name, violation_error_message=violation_error_message)
def _get_check_sql(self, model, schema_editor):
query = Query(model=model, alias_cols=False)
where = query.build_where(self.check)
compiler = query.get_compiler(connection=schema_editor.connection)
sql, params = where.as_sql(compiler, schema_editor.connection)
return sql % tuple(schema_editor.quote_value(p) for p in params)
def constraint_sql(self, model, schema_editor):
check = self._get_check_sql(model, schema_editor)
return schema_editor._check_sql(self.name, check)
def create_sql(self, model, schema_editor):
check = self._get_check_sql(model, schema_editor)
return schema_editor._create_check_sql(model, self.name, check)
def remove_sql(self, model, schema_editor):
return schema_editor._delete_check_sql(model, self.name)
def validate(self, model, instance, exclude=None, using=DEFAULT_DB_ALIAS):
against = instance._get_field_value_map(meta=model._meta, exclude=exclude)
try:
if not Q(self.check).check(against, using=using):
raise ValidationError(self.get_violation_error_message())
except FieldError:
pass
def __repr__(self):
return "<%s: check=%s name=%s>" % (
self.__class__.__qualname__,
self.check,
repr(self.name),
)
def __eq__(self, other):
if isinstance(other, CheckConstraint):
return (
self.name == other.name
and self.check == other.check
and self.violation_error_message == other.violation_error_message
)
return super().__eq__(other)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
kwargs["check"] = self.check
return path, args, kwargs
class Deferrable(Enum):
DEFERRED = "deferred"
IMMEDIATE = "immediate"
# A similar format was proposed for Python 3.10.
def __repr__(self):
return f"{self.__class__.__qualname__}.{self._name_}"
class UniqueConstraint(BaseConstraint):
def __init__(
self,
*expressions,
fields=(),
name=None,
condition=None,
deferrable=None,
include=None,
opclasses=(),
violation_error_message=None,
):
if not name:
raise ValueError("A unique constraint must be named.")
if not expressions and not fields:
raise ValueError(
"At least one field or expression is required to define a "
"unique constraint."
)
if expressions and fields:
raise ValueError(
"UniqueConstraint.fields and expressions are mutually exclusive."
)
if not isinstance(condition, (type(None), Q)):
raise ValueError("UniqueConstraint.condition must be a Q instance.")
if condition and deferrable:
raise ValueError("UniqueConstraint with conditions cannot be deferred.")
if include and deferrable:
raise ValueError("UniqueConstraint with include fields cannot be deferred.")
if opclasses and deferrable:
raise ValueError("UniqueConstraint with opclasses cannot be deferred.")
if expressions and deferrable:
raise ValueError("UniqueConstraint with expressions cannot be deferred.")
if expressions and opclasses:
raise ValueError(
"UniqueConstraint.opclasses cannot be used with expressions. "
"Use django.contrib.postgres.indexes.OpClass() instead."
)
if not isinstance(deferrable, (type(None), Deferrable)):
raise ValueError(
"UniqueConstraint.deferrable must be a Deferrable instance."
)
if not isinstance(include, (type(None), list, tuple)):
raise ValueError("UniqueConstraint.include must be a list or tuple.")
if not isinstance(opclasses, (list, tuple)):
raise ValueError("UniqueConstraint.opclasses must be a list or tuple.")
if opclasses and len(fields) != len(opclasses):
raise ValueError(
"UniqueConstraint.fields and UniqueConstraint.opclasses must "
"have the same number of elements."
)
self.fields = tuple(fields)
self.condition = condition
self.deferrable = deferrable
self.include = tuple(include) if include else ()
self.opclasses = opclasses
self.expressions = tuple(
F(expression) if isinstance(expression, str) else expression
for expression in expressions
)
super().__init__(name, violation_error_message=violation_error_message)
@property
def contains_expressions(self):
return bool(self.expressions)
def _get_condition_sql(self, model, schema_editor):
if self.condition is None:
return None
query = Query(model=model, alias_cols=False)
where = query.build_where(self.condition)
compiler = query.get_compiler(connection=schema_editor.connection)
sql, params = where.as_sql(compiler, schema_editor.connection)
return sql % tuple(schema_editor.quote_value(p) for p in params)
def _get_index_expressions(self, model, schema_editor):
if not self.expressions:
return None
index_expressions = []
for expression in self.expressions:
index_expression = IndexExpression(expression)
index_expression.set_wrapper_classes(schema_editor.connection)
index_expressions.append(index_expression)
return ExpressionList(*index_expressions).resolve_expression(
Query(model, alias_cols=False),
)
def constraint_sql(self, model, schema_editor):
fields = [model._meta.get_field(field_name) for field_name in self.fields]
include = [
model._meta.get_field(field_name).column for field_name in self.include
]
condition = self._get_condition_sql(model, schema_editor)
expressions = self._get_index_expressions(model, schema_editor)
return schema_editor._unique_sql(
model,
fields,
self.name,
condition=condition,
deferrable=self.deferrable,
include=include,
opclasses=self.opclasses,
expressions=expressions,
)
def create_sql(self, model, schema_editor):
fields = [model._meta.get_field(field_name) for field_name in self.fields]
include = [
model._meta.get_field(field_name).column for field_name in self.include
]
condition = self._get_condition_sql(model, schema_editor)
expressions = self._get_index_expressions(model, schema_editor)
return schema_editor._create_unique_sql(
model,
fields,
self.name,
condition=condition,
deferrable=self.deferrable,
include=include,
opclasses=self.opclasses,
expressions=expressions,
)
def remove_sql(self, model, schema_editor):
condition = self._get_condition_sql(model, schema_editor)
include = [
model._meta.get_field(field_name).column for field_name in self.include
]
expressions = self._get_index_expressions(model, schema_editor)
return schema_editor._delete_unique_sql(
model,
self.name,
condition=condition,
deferrable=self.deferrable,
include=include,
opclasses=self.opclasses,
expressions=expressions,
)
def __repr__(self):
return "<%s:%s%s%s%s%s%s%s>" % (
self.__class__.__qualname__,
"" if not self.fields else " fields=%s" % repr(self.fields),
"" if not self.expressions else " expressions=%s" % repr(self.expressions),
" name=%s" % repr(self.name),
"" if self.condition is None else " condition=%s" % self.condition,
"" if self.deferrable is None else " deferrable=%r" % self.deferrable,
"" if not self.include else " include=%s" % repr(self.include),
"" if not self.opclasses else " opclasses=%s" % repr(self.opclasses),
)
def __eq__(self, other):
if isinstance(other, UniqueConstraint):
return (
self.name == other.name
and self.fields == other.fields
and self.condition == other.condition
and self.deferrable == other.deferrable
and self.include == other.include
and self.opclasses == other.opclasses
and self.expressions == other.expressions
and self.violation_error_message == other.violation_error_message
)
return super().__eq__(other)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
if self.fields:
kwargs["fields"] = self.fields
if self.condition:
kwargs["condition"] = self.condition
if self.deferrable:
kwargs["deferrable"] = self.deferrable
if self.include:
kwargs["include"] = self.include
if self.opclasses:
kwargs["opclasses"] = self.opclasses
return path, self.expressions, kwargs
def validate(self, model, instance, exclude=None, using=DEFAULT_DB_ALIAS):
queryset = model._default_manager.using(using)
if self.fields:
lookup_kwargs = {}
for field_name in self.fields:
if exclude and field_name in exclude:
return
field = model._meta.get_field(field_name)
lookup_value = getattr(instance, field.attname)
if lookup_value is None or (
lookup_value == ""
and connections[using].features.interprets_empty_strings_as_nulls
):
# A composite constraint containing NULL value cannot cause
# a violation since NULL != NULL in SQL.
return
lookup_kwargs[field.name] = lookup_value
queryset = queryset.filter(**lookup_kwargs)
else:
# Ignore constraints with excluded fields.
if exclude:
for expression in self.expressions:
if hasattr(expression, "flatten"):
for expr in expression.flatten():
if isinstance(expr, F) and expr.name in exclude:
return
elif isinstance(expression, F) and expression.name in exclude:
return
replacement_map = instance._get_field_value_map(
meta=model._meta, exclude=exclude
)
expressions = [
Exact(expr, expr.replace_references(replacement_map))
for expr in self.expressions
]
queryset = queryset.filter(*expressions)
model_class_pk = instance._get_pk_val(model._meta)
if not instance._state.adding and model_class_pk is not None:
queryset = queryset.exclude(pk=model_class_pk)
if not self.condition:
if queryset.exists():
if self.expressions:
raise ValidationError(self.get_violation_error_message())
# When fields are defined, use the unique_error_message() for
# backward compatibility.
for model, constraints in instance.get_constraints():
for constraint in constraints:
if constraint is self:
raise ValidationError(
instance.unique_error_message(model, self.fields)
)
else:
against = instance._get_field_value_map(meta=model._meta, exclude=exclude)
try:
if (self.condition & Exists(queryset.filter(self.condition))).check(
against, using=using
):
raise ValidationError(self.get_violation_error_message())
except FieldError:
pass
|
2f9db32649776785ec00a94f773e7c245f640b259e2ecd4984ebff6e95a88bdf | from unittest import mock
from django.core.exceptions import ValidationError
from django.db import IntegrityError, connection, models
from django.db.models import F
from django.db.models.constraints import BaseConstraint
from django.db.models.functions import Lower
from django.db.transaction import atomic
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from .models import (
ChildModel,
ChildUniqueConstraintProduct,
Product,
UniqueConstraintConditionProduct,
UniqueConstraintDeferrable,
UniqueConstraintInclude,
UniqueConstraintProduct,
)
def get_constraints(table):
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
class BaseConstraintTests(SimpleTestCase):
def test_constraint_sql(self):
c = BaseConstraint("name")
msg = "This method must be implemented by a subclass."
with self.assertRaisesMessage(NotImplementedError, msg):
c.constraint_sql(None, None)
def test_contains_expressions(self):
c = BaseConstraint("name")
self.assertIs(c.contains_expressions, False)
def test_create_sql(self):
c = BaseConstraint("name")
msg = "This method must be implemented by a subclass."
with self.assertRaisesMessage(NotImplementedError, msg):
c.create_sql(None, None)
def test_remove_sql(self):
c = BaseConstraint("name")
msg = "This method must be implemented by a subclass."
with self.assertRaisesMessage(NotImplementedError, msg):
c.remove_sql(None, None)
def test_validate(self):
c = BaseConstraint("name")
msg = "This method must be implemented by a subclass."
with self.assertRaisesMessage(NotImplementedError, msg):
c.validate(None, None)
def test_default_violation_error_message(self):
c = BaseConstraint("name")
self.assertEqual(
c.get_violation_error_message(), "Constraint “name” is violated."
)
def test_custom_violation_error_message(self):
c = BaseConstraint(
"base_name", violation_error_message="custom %(name)s message"
)
self.assertEqual(c.get_violation_error_message(), "custom base_name message")
def test_custom_violation_error_message_clone(self):
constraint = BaseConstraint(
"base_name",
violation_error_message="custom %(name)s message",
).clone()
self.assertEqual(
constraint.get_violation_error_message(),
"custom base_name message",
)
def test_deconstruction(self):
constraint = BaseConstraint(
"base_name",
violation_error_message="custom %(name)s message",
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, "django.db.models.BaseConstraint")
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{"name": "base_name", "violation_error_message": "custom %(name)s message"},
)
class CheckConstraintTests(TestCase):
def test_eq(self):
check1 = models.Q(price__gt=models.F("discounted_price"))
check2 = models.Q(price__lt=models.F("discounted_price"))
self.assertEqual(
models.CheckConstraint(check=check1, name="price"),
models.CheckConstraint(check=check1, name="price"),
)
self.assertEqual(models.CheckConstraint(check=check1, name="price"), mock.ANY)
self.assertNotEqual(
models.CheckConstraint(check=check1, name="price"),
models.CheckConstraint(check=check1, name="price2"),
)
self.assertNotEqual(
models.CheckConstraint(check=check1, name="price"),
models.CheckConstraint(check=check2, name="price"),
)
self.assertNotEqual(models.CheckConstraint(check=check1, name="price"), 1)
self.assertNotEqual(
models.CheckConstraint(check=check1, name="price"),
models.CheckConstraint(
check=check1, name="price", violation_error_message="custom error"
),
)
self.assertNotEqual(
models.CheckConstraint(
check=check1, name="price", violation_error_message="custom error"
),
models.CheckConstraint(
check=check1, name="price", violation_error_message="other custom error"
),
)
self.assertEqual(
models.CheckConstraint(
check=check1, name="price", violation_error_message="custom error"
),
models.CheckConstraint(
check=check1, name="price", violation_error_message="custom error"
),
)
def test_repr(self):
constraint = models.CheckConstraint(
check=models.Q(price__gt=models.F("discounted_price")),
name="price_gt_discounted_price",
)
self.assertEqual(
repr(constraint),
"<CheckConstraint: check=(AND: ('price__gt', F(discounted_price))) "
"name='price_gt_discounted_price'>",
)
def test_invalid_check_types(self):
msg = "CheckConstraint.check must be a Q instance or boolean expression."
with self.assertRaisesMessage(TypeError, msg):
models.CheckConstraint(check=models.F("discounted_price"), name="check")
def test_deconstruction(self):
check = models.Q(price__gt=models.F("discounted_price"))
name = "price_gt_discounted_price"
constraint = models.CheckConstraint(check=check, name=name)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, "django.db.models.CheckConstraint")
self.assertEqual(args, ())
self.assertEqual(kwargs, {"check": check, "name": name})
@skipUnlessDBFeature("supports_table_check_constraints")
def test_database_constraint(self):
Product.objects.create(price=10, discounted_price=5)
with self.assertRaises(IntegrityError):
Product.objects.create(price=10, discounted_price=20)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_database_constraint_unicode(self):
Product.objects.create(price=10, discounted_price=5, unit="μg/mL")
with self.assertRaises(IntegrityError):
Product.objects.create(price=10, discounted_price=7, unit="l")
@skipUnlessDBFeature(
"supports_table_check_constraints", "can_introspect_check_constraints"
)
def test_name(self):
constraints = get_constraints(Product._meta.db_table)
for expected_name in (
"price_gt_discounted_price",
"constraints_product_price_gt_0",
):
with self.subTest(expected_name):
self.assertIn(expected_name, constraints)
@skipUnlessDBFeature(
"supports_table_check_constraints", "can_introspect_check_constraints"
)
def test_abstract_name(self):
constraints = get_constraints(ChildModel._meta.db_table)
self.assertIn("constraints_childmodel_adult", constraints)
def test_validate(self):
check = models.Q(price__gt=models.F("discounted_price"))
constraint = models.CheckConstraint(check=check, name="price")
# Invalid product.
invalid_product = Product(price=10, discounted_price=42)
with self.assertRaises(ValidationError):
constraint.validate(Product, invalid_product)
with self.assertRaises(ValidationError):
constraint.validate(Product, invalid_product, exclude={"unit"})
# Fields used by the check constraint are excluded.
constraint.validate(Product, invalid_product, exclude={"price"})
constraint.validate(Product, invalid_product, exclude={"discounted_price"})
constraint.validate(
Product,
invalid_product,
exclude={"discounted_price", "price"},
)
# Valid product.
constraint.validate(Product, Product(price=10, discounted_price=5))
def test_validate_boolean_expressions(self):
constraint = models.CheckConstraint(
check=models.expressions.ExpressionWrapper(
models.Q(price__gt=500) | models.Q(price__lt=500),
output_field=models.BooleanField(),
),
name="price_neq_500_wrap",
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(Product, Product(price=500, discounted_price=5))
constraint.validate(Product, Product(price=501, discounted_price=5))
constraint.validate(Product, Product(price=499, discounted_price=5))
def test_validate_rawsql_expressions_noop(self):
constraint = models.CheckConstraint(
check=models.expressions.RawSQL(
"price < %s OR price > %s",
(500, 500),
output_field=models.BooleanField(),
),
name="price_neq_500_raw",
)
# RawSQL can not be checked and is always considered valid.
constraint.validate(Product, Product(price=500, discounted_price=5))
constraint.validate(Product, Product(price=501, discounted_price=5))
constraint.validate(Product, Product(price=499, discounted_price=5))
class UniqueConstraintTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.p1 = UniqueConstraintProduct.objects.create(name="p1", color="red")
cls.p2 = UniqueConstraintProduct.objects.create(name="p2")
def test_eq(self):
self.assertEqual(
models.UniqueConstraint(fields=["foo", "bar"], name="unique"),
models.UniqueConstraint(fields=["foo", "bar"], name="unique"),
)
self.assertEqual(
models.UniqueConstraint(fields=["foo", "bar"], name="unique"),
mock.ANY,
)
self.assertNotEqual(
models.UniqueConstraint(fields=["foo", "bar"], name="unique"),
models.UniqueConstraint(fields=["foo", "bar"], name="unique2"),
)
self.assertNotEqual(
models.UniqueConstraint(fields=["foo", "bar"], name="unique"),
models.UniqueConstraint(fields=["foo", "baz"], name="unique"),
)
self.assertNotEqual(
models.UniqueConstraint(fields=["foo", "bar"], name="unique"), 1
)
self.assertNotEqual(
models.UniqueConstraint(fields=["foo", "bar"], name="unique"),
models.UniqueConstraint(
fields=["foo", "bar"],
name="unique",
violation_error_message="custom error",
),
)
self.assertNotEqual(
models.UniqueConstraint(
fields=["foo", "bar"],
name="unique",
violation_error_message="custom error",
),
models.UniqueConstraint(
fields=["foo", "bar"],
name="unique",
violation_error_message="other custom error",
),
)
self.assertEqual(
models.UniqueConstraint(
fields=["foo", "bar"],
name="unique",
violation_error_message="custom error",
),
models.UniqueConstraint(
fields=["foo", "bar"],
name="unique",
violation_error_message="custom error",
),
)
def test_eq_with_condition(self):
self.assertEqual(
models.UniqueConstraint(
fields=["foo", "bar"],
name="unique",
condition=models.Q(foo=models.F("bar")),
),
models.UniqueConstraint(
fields=["foo", "bar"],
name="unique",
condition=models.Q(foo=models.F("bar")),
),
)
self.assertNotEqual(
models.UniqueConstraint(
fields=["foo", "bar"],
name="unique",
condition=models.Q(foo=models.F("bar")),
),
models.UniqueConstraint(
fields=["foo", "bar"],
name="unique",
condition=models.Q(foo=models.F("baz")),
),
)
def test_eq_with_deferrable(self):
constraint_1 = models.UniqueConstraint(
fields=["foo", "bar"],
name="unique",
deferrable=models.Deferrable.DEFERRED,
)
constraint_2 = models.UniqueConstraint(
fields=["foo", "bar"],
name="unique",
deferrable=models.Deferrable.IMMEDIATE,
)
self.assertEqual(constraint_1, constraint_1)
self.assertNotEqual(constraint_1, constraint_2)
def test_eq_with_include(self):
constraint_1 = models.UniqueConstraint(
fields=["foo", "bar"],
name="include",
include=["baz_1"],
)
constraint_2 = models.UniqueConstraint(
fields=["foo", "bar"],
name="include",
include=["baz_2"],
)
self.assertEqual(constraint_1, constraint_1)
self.assertNotEqual(constraint_1, constraint_2)
def test_eq_with_opclasses(self):
constraint_1 = models.UniqueConstraint(
fields=["foo", "bar"],
name="opclasses",
opclasses=["text_pattern_ops", "varchar_pattern_ops"],
)
constraint_2 = models.UniqueConstraint(
fields=["foo", "bar"],
name="opclasses",
opclasses=["varchar_pattern_ops", "text_pattern_ops"],
)
self.assertEqual(constraint_1, constraint_1)
self.assertNotEqual(constraint_1, constraint_2)
def test_eq_with_expressions(self):
constraint = models.UniqueConstraint(
Lower("title"),
F("author"),
name="book_func_uq",
)
same_constraint = models.UniqueConstraint(
Lower("title"),
"author",
name="book_func_uq",
)
another_constraint = models.UniqueConstraint(
Lower("title"),
name="book_func_uq",
)
self.assertEqual(constraint, same_constraint)
self.assertEqual(constraint, mock.ANY)
self.assertNotEqual(constraint, another_constraint)
def test_repr(self):
fields = ["foo", "bar"]
name = "unique_fields"
constraint = models.UniqueConstraint(fields=fields, name=name)
self.assertEqual(
repr(constraint),
"<UniqueConstraint: fields=('foo', 'bar') name='unique_fields'>",
)
def test_repr_with_condition(self):
constraint = models.UniqueConstraint(
fields=["foo", "bar"],
name="unique_fields",
condition=models.Q(foo=models.F("bar")),
)
self.assertEqual(
repr(constraint),
"<UniqueConstraint: fields=('foo', 'bar') name='unique_fields' "
"condition=(AND: ('foo', F(bar)))>",
)
def test_repr_with_deferrable(self):
constraint = models.UniqueConstraint(
fields=["foo", "bar"],
name="unique_fields",
deferrable=models.Deferrable.IMMEDIATE,
)
self.assertEqual(
repr(constraint),
"<UniqueConstraint: fields=('foo', 'bar') name='unique_fields' "
"deferrable=Deferrable.IMMEDIATE>",
)
def test_repr_with_include(self):
constraint = models.UniqueConstraint(
fields=["foo", "bar"],
name="include_fields",
include=["baz_1", "baz_2"],
)
self.assertEqual(
repr(constraint),
"<UniqueConstraint: fields=('foo', 'bar') name='include_fields' "
"include=('baz_1', 'baz_2')>",
)
def test_repr_with_opclasses(self):
constraint = models.UniqueConstraint(
fields=["foo", "bar"],
name="opclasses_fields",
opclasses=["text_pattern_ops", "varchar_pattern_ops"],
)
self.assertEqual(
repr(constraint),
"<UniqueConstraint: fields=('foo', 'bar') name='opclasses_fields' "
"opclasses=['text_pattern_ops', 'varchar_pattern_ops']>",
)
def test_repr_with_expressions(self):
constraint = models.UniqueConstraint(
Lower("title"),
F("author"),
name="book_func_uq",
)
self.assertEqual(
repr(constraint),
"<UniqueConstraint: expressions=(Lower(F(title)), F(author)) "
"name='book_func_uq'>",
)
def test_deconstruction(self):
fields = ["foo", "bar"]
name = "unique_fields"
constraint = models.UniqueConstraint(fields=fields, name=name)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, "django.db.models.UniqueConstraint")
self.assertEqual(args, ())
self.assertEqual(kwargs, {"fields": tuple(fields), "name": name})
def test_deconstruction_with_condition(self):
fields = ["foo", "bar"]
name = "unique_fields"
condition = models.Q(foo=models.F("bar"))
constraint = models.UniqueConstraint(
fields=fields, name=name, condition=condition
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, "django.db.models.UniqueConstraint")
self.assertEqual(args, ())
self.assertEqual(
kwargs, {"fields": tuple(fields), "name": name, "condition": condition}
)
def test_deconstruction_with_deferrable(self):
fields = ["foo"]
name = "unique_fields"
constraint = models.UniqueConstraint(
fields=fields,
name=name,
deferrable=models.Deferrable.DEFERRED,
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, "django.db.models.UniqueConstraint")
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"fields": tuple(fields),
"name": name,
"deferrable": models.Deferrable.DEFERRED,
},
)
def test_deconstruction_with_include(self):
fields = ["foo", "bar"]
name = "unique_fields"
include = ["baz_1", "baz_2"]
constraint = models.UniqueConstraint(fields=fields, name=name, include=include)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, "django.db.models.UniqueConstraint")
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"fields": tuple(fields),
"name": name,
"include": tuple(include),
},
)
def test_deconstruction_with_opclasses(self):
fields = ["foo", "bar"]
name = "unique_fields"
opclasses = ["varchar_pattern_ops", "text_pattern_ops"]
constraint = models.UniqueConstraint(
fields=fields, name=name, opclasses=opclasses
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, "django.db.models.UniqueConstraint")
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"fields": tuple(fields),
"name": name,
"opclasses": opclasses,
},
)
def test_deconstruction_with_expressions(self):
name = "unique_fields"
constraint = models.UniqueConstraint(Lower("title"), name=name)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, "django.db.models.UniqueConstraint")
self.assertEqual(args, (Lower("title"),))
self.assertEqual(kwargs, {"name": name})
def test_database_constraint(self):
with self.assertRaises(IntegrityError):
UniqueConstraintProduct.objects.create(
name=self.p1.name, color=self.p1.color
)
@skipUnlessDBFeature("supports_partial_indexes")
def test_database_constraint_with_condition(self):
UniqueConstraintConditionProduct.objects.create(name="p1")
UniqueConstraintConditionProduct.objects.create(name="p2")
with self.assertRaises(IntegrityError):
UniqueConstraintConditionProduct.objects.create(name="p1")
def test_model_validation(self):
msg = "Unique constraint product with this Name and Color already exists."
with self.assertRaisesMessage(ValidationError, msg):
UniqueConstraintProduct(
name=self.p1.name, color=self.p1.color
).validate_constraints()
@skipUnlessDBFeature("supports_partial_indexes")
def test_model_validation_with_condition(self):
"""
Partial unique constraints are not ignored by
Model.validate_constraints().
"""
obj1 = UniqueConstraintConditionProduct.objects.create(name="p1", color="red")
obj2 = UniqueConstraintConditionProduct.objects.create(name="p2")
UniqueConstraintConditionProduct(
name=obj1.name, color="blue"
).validate_constraints()
msg = "Constraint “name_without_color_uniq” is violated."
with self.assertRaisesMessage(ValidationError, msg):
UniqueConstraintConditionProduct(name=obj2.name).validate_constraints()
def test_validate(self):
constraint = UniqueConstraintProduct._meta.constraints[0]
msg = "Unique constraint product with this Name and Color already exists."
non_unique_product = UniqueConstraintProduct(
name=self.p1.name, color=self.p1.color
)
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(UniqueConstraintProduct, non_unique_product)
# Null values are ignored.
constraint.validate(
UniqueConstraintProduct,
UniqueConstraintProduct(name=self.p2.name, color=None),
)
# Existing instances have their existing row excluded.
constraint.validate(UniqueConstraintProduct, self.p1)
# Unique fields are excluded.
constraint.validate(
UniqueConstraintProduct,
non_unique_product,
exclude={"name"},
)
constraint.validate(
UniqueConstraintProduct,
non_unique_product,
exclude={"color"},
)
constraint.validate(
UniqueConstraintProduct,
non_unique_product,
exclude={"name", "color"},
)
# Validation on a child instance.
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(
UniqueConstraintProduct,
ChildUniqueConstraintProduct(name=self.p1.name, color=self.p1.color),
)
@skipUnlessDBFeature("supports_partial_indexes")
def test_validate_condition(self):
p1 = UniqueConstraintConditionProduct.objects.create(name="p1")
constraint = UniqueConstraintConditionProduct._meta.constraints[0]
msg = "Constraint “name_without_color_uniq” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(
UniqueConstraintConditionProduct,
UniqueConstraintConditionProduct(name=p1.name, color=None),
)
# Values not matching condition are ignored.
constraint.validate(
UniqueConstraintConditionProduct,
UniqueConstraintConditionProduct(name=p1.name, color="anything-but-none"),
)
# Existing instances have their existing row excluded.
constraint.validate(UniqueConstraintConditionProduct, p1)
# Unique field is excluded.
constraint.validate(
UniqueConstraintConditionProduct,
UniqueConstraintConditionProduct(name=p1.name, color=None),
exclude={"name"},
)
def test_validate_expression(self):
constraint = models.UniqueConstraint(Lower("name"), name="name_lower_uniq")
msg = "Constraint “name_lower_uniq” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(
UniqueConstraintProduct,
UniqueConstraintProduct(name=self.p1.name.upper()),
)
constraint.validate(
UniqueConstraintProduct,
UniqueConstraintProduct(name="another-name"),
)
# Existing instances have their existing row excluded.
constraint.validate(UniqueConstraintProduct, self.p1)
# Unique field is excluded.
constraint.validate(
UniqueConstraintProduct,
UniqueConstraintProduct(name=self.p1.name.upper()),
exclude={"name"},
)
def test_validate_expression_condition(self):
constraint = models.UniqueConstraint(
Lower("name"),
name="name_lower_without_color_uniq",
condition=models.Q(color__isnull=True),
)
non_unique_product = UniqueConstraintProduct(name=self.p2.name.upper())
msg = "Constraint “name_lower_without_color_uniq” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(UniqueConstraintProduct, non_unique_product)
# Values not matching condition are ignored.
constraint.validate(
UniqueConstraintProduct,
UniqueConstraintProduct(name=self.p1.name, color=self.p1.color),
)
# Existing instances have their existing row excluded.
constraint.validate(UniqueConstraintProduct, self.p2)
# Unique field is excluded.
constraint.validate(
UniqueConstraintProduct,
non_unique_product,
exclude={"name"},
)
# Field from a condition is excluded.
constraint.validate(
UniqueConstraintProduct,
non_unique_product,
exclude={"color"},
)
def test_validate_expression_str(self):
constraint = models.UniqueConstraint("name", name="name_uniq")
msg = "Constraint “name_uniq” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(
UniqueConstraintProduct,
UniqueConstraintProduct(name=self.p1.name),
)
constraint.validate(
UniqueConstraintProduct,
UniqueConstraintProduct(name=self.p1.name),
exclude={"name"},
)
def test_name(self):
constraints = get_constraints(UniqueConstraintProduct._meta.db_table)
expected_name = "name_color_uniq"
self.assertIn(expected_name, constraints)
def test_condition_must_be_q(self):
with self.assertRaisesMessage(
ValueError, "UniqueConstraint.condition must be a Q instance."
):
models.UniqueConstraint(name="uniq", fields=["name"], condition="invalid")
@skipUnlessDBFeature("supports_deferrable_unique_constraints")
def test_initially_deferred_database_constraint(self):
obj_1 = UniqueConstraintDeferrable.objects.create(name="p1", shelf="front")
obj_2 = UniqueConstraintDeferrable.objects.create(name="p2", shelf="back")
def swap():
obj_1.name, obj_2.name = obj_2.name, obj_1.name
obj_1.save()
obj_2.save()
swap()
# Behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with atomic(), connection.cursor() as cursor:
constraint_name = connection.ops.quote_name("name_init_deferred_uniq")
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % constraint_name)
swap()
@skipUnlessDBFeature("supports_deferrable_unique_constraints")
def test_initially_immediate_database_constraint(self):
obj_1 = UniqueConstraintDeferrable.objects.create(name="p1", shelf="front")
obj_2 = UniqueConstraintDeferrable.objects.create(name="p2", shelf="back")
obj_1.shelf, obj_2.shelf = obj_2.shelf, obj_1.shelf
with self.assertRaises(IntegrityError), atomic():
obj_1.save()
# Behavior can be changed with SET CONSTRAINTS.
with connection.cursor() as cursor:
constraint_name = connection.ops.quote_name("sheld_init_immediate_uniq")
cursor.execute("SET CONSTRAINTS %s DEFERRED" % constraint_name)
obj_1.save()
obj_2.save()
def test_deferrable_with_condition(self):
message = "UniqueConstraint with conditions cannot be deferred."
with self.assertRaisesMessage(ValueError, message):
models.UniqueConstraint(
fields=["name"],
name="name_without_color_unique",
condition=models.Q(color__isnull=True),
deferrable=models.Deferrable.DEFERRED,
)
def test_deferrable_with_include(self):
message = "UniqueConstraint with include fields cannot be deferred."
with self.assertRaisesMessage(ValueError, message):
models.UniqueConstraint(
fields=["name"],
name="name_inc_color_color_unique",
include=["color"],
deferrable=models.Deferrable.DEFERRED,
)
def test_deferrable_with_opclasses(self):
message = "UniqueConstraint with opclasses cannot be deferred."
with self.assertRaisesMessage(ValueError, message):
models.UniqueConstraint(
fields=["name"],
name="name_text_pattern_ops_unique",
opclasses=["text_pattern_ops"],
deferrable=models.Deferrable.DEFERRED,
)
def test_deferrable_with_expressions(self):
message = "UniqueConstraint with expressions cannot be deferred."
with self.assertRaisesMessage(ValueError, message):
models.UniqueConstraint(
Lower("name"),
name="deferred_expression_unique",
deferrable=models.Deferrable.DEFERRED,
)
def test_invalid_defer_argument(self):
message = "UniqueConstraint.deferrable must be a Deferrable instance."
with self.assertRaisesMessage(ValueError, message):
models.UniqueConstraint(
fields=["name"],
name="name_invalid",
deferrable="invalid",
)
@skipUnlessDBFeature(
"supports_table_check_constraints",
"supports_covering_indexes",
)
def test_include_database_constraint(self):
UniqueConstraintInclude.objects.create(name="p1", color="red")
with self.assertRaises(IntegrityError):
UniqueConstraintInclude.objects.create(name="p1", color="blue")
def test_invalid_include_argument(self):
msg = "UniqueConstraint.include must be a list or tuple."
with self.assertRaisesMessage(ValueError, msg):
models.UniqueConstraint(
name="uniq_include",
fields=["field"],
include="other",
)
def test_invalid_opclasses_argument(self):
msg = "UniqueConstraint.opclasses must be a list or tuple."
with self.assertRaisesMessage(ValueError, msg):
models.UniqueConstraint(
name="uniq_opclasses",
fields=["field"],
opclasses="jsonb_path_ops",
)
def test_opclasses_and_fields_same_length(self):
msg = (
"UniqueConstraint.fields and UniqueConstraint.opclasses must have "
"the same number of elements."
)
with self.assertRaisesMessage(ValueError, msg):
models.UniqueConstraint(
name="uniq_opclasses",
fields=["field"],
opclasses=["foo", "bar"],
)
def test_requires_field_or_expression(self):
msg = (
"At least one field or expression is required to define a unique "
"constraint."
)
with self.assertRaisesMessage(ValueError, msg):
models.UniqueConstraint(name="name")
def test_expressions_and_fields_mutually_exclusive(self):
msg = "UniqueConstraint.fields and expressions are mutually exclusive."
with self.assertRaisesMessage(ValueError, msg):
models.UniqueConstraint(Lower("field_1"), fields=["field_2"], name="name")
def test_expressions_with_opclasses(self):
msg = (
"UniqueConstraint.opclasses cannot be used with expressions. Use "
"django.contrib.postgres.indexes.OpClass() instead."
)
with self.assertRaisesMessage(ValueError, msg):
models.UniqueConstraint(
Lower("field"),
name="test_func_opclass",
opclasses=["jsonb_path_ops"],
)
def test_requires_name(self):
msg = "A unique constraint must be named."
with self.assertRaisesMessage(ValueError, msg):
models.UniqueConstraint(fields=["field"])
|
03ecea705c5e76018e3fe2039031bf7cad3fd9095f80b09dde7a061f0814b63e | import copy
import datetime
import functools
import inspect
import warnings
from collections import defaultdict
from decimal import Decimal
from uuid import UUID
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DatabaseError, NotSupportedError, connection
from django.db.models import fields
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import Q
from django.utils.deconstruct import deconstructible
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
class SQLiteNumericMixin:
"""
Some expressions with output_field=DecimalField() must be cast to
numeric to be properly filtered.
"""
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
try:
if self.output_field.get_internal_type() == "DecimalField":
sql = "CAST(%s AS NUMERIC)" % sql
except FieldError:
pass
return sql, params
class Combinable:
"""
Provide the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = "+"
SUB = "-"
MUL = "*"
DIV = "/"
POW = "^"
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = "%%"
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = "&"
BITOR = "|"
BITLEFTSHIFT = "<<"
BITRIGHTSHIFT = ">>"
BITXOR = "#"
def _combine(self, other, connector, reversed):
if not hasattr(other, "resolve_expression"):
# everything must be resolvable to an expression
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __neg__(self):
return self._combine(-1, self.MUL, False)
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) & Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def bitleftshift(self, other):
return self._combine(other, self.BITLEFTSHIFT, False)
def bitrightshift(self, other):
return self._combine(other, self.BITRIGHTSHIFT, False)
def __xor__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) ^ Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitxor(self, other):
return self._combine(other, self.BITXOR, False)
def __or__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) | Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def __rxor__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
class BaseExpression:
"""Base class for all query expressions."""
empty_result_set_value = NotImplemented
# aggregate specific fields
is_summary = False
_output_field_resolved_to_none = False
# Can the expression be used in a WHERE clause?
filterable = True
# Can the expression can be used as a source expression in Window?
window_compatible = False
def __init__(self, output_field=None):
if output_field is not None:
self.output_field = output_field
def __getstate__(self):
state = self.__dict__.copy()
state.pop("convert_value", None)
return state
def get_db_converters(self, connection):
return (
[]
if self.convert_value is self._convert_value_noop
else [self.convert_value]
) + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert not exprs
def _parse_expressions(self, *expressions):
return [
arg
if hasattr(arg, "resolve_expression")
else (F(arg) if isinstance(arg, str) else Value(arg))
for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super().as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Return: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
return any(
expr and expr.contains_aggregate for expr in self.get_source_expressions()
)
@cached_property
def contains_over_clause(self):
return any(
expr and expr.contains_over_clause for expr in self.get_source_expressions()
)
@cached_property
def contains_column_references(self):
return any(
expr and expr.contains_column_references
for expr in self.get_source_expressions()
)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
"""
Provide the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Return: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions(
[
expr.resolve_expression(query, allow_joins, reuse, summarize)
if expr
else None
for expr in c.get_source_expressions()
]
)
return c
@property
def conditional(self):
return isinstance(self.output_field, fields.BooleanField)
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""Return the output type of this expressions."""
output_field = self._resolve_output_field()
if output_field is None:
self._output_field_resolved_to_none = True
raise FieldError("Cannot resolve expression type, unknown output_field")
return output_field
@cached_property
def _output_field_or_none(self):
"""
Return the output field of this expression, or None if
_resolve_output_field() didn't return an output type.
"""
try:
return self.output_field
except FieldError:
if not self._output_field_resolved_to_none:
raise
def _resolve_output_field(self):
"""
Attempt to infer the output type of the expression.
As a guess, if the output fields of all source fields match then simply
infer the same type here.
If a source's output field resolves to None, exclude it from this check.
If all sources are None, then an error is raised higher up the stack in
the output_field property.
"""
# This guess is mostly a bad idea, but there is quite a lot of code
# (especially 3rd party Func subclasses) that depend on it, we'd need a
# deprecation path to fix it.
sources_iter = (
source for source in self.get_source_fields() if source is not None
)
for output_field in sources_iter:
for source in sources_iter:
if not isinstance(output_field, source.__class__):
raise FieldError(
"Expression contains mixed types: %s, %s. You must "
"set output_field."
% (
output_field.__class__.__name__,
source.__class__.__name__,
)
)
return output_field
@staticmethod
def _convert_value_noop(value, expression, connection):
return value
@cached_property
def convert_value(self):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if internal_type == "FloatField":
return (
lambda value, expression, connection: None
if value is None
else float(value)
)
elif internal_type.endswith("IntegerField"):
return (
lambda value, expression, connection: None
if value is None
else int(value)
)
elif internal_type == "DecimalField":
return (
lambda value, expression, connection: None
if value is None
else Decimal(value)
)
return self._convert_value_noop
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[
e.relabeled_clone(change_map) if e is not None else None
for e in self.get_source_expressions()
]
)
return clone
def replace_expressions(self, replacements):
if replacement := replacements.get(self):
return replacement
clone = self.copy()
source_expressions = clone.get_source_expressions()
clone.set_source_expressions(
[
expr.replace_expressions(replacements) if expr else None
for expr in source_expressions
]
)
return clone
def copy(self):
return copy.copy(self)
def prefix_references(self, prefix):
clone = self.copy()
clone.set_source_expressions(
[
F(f"{prefix}{expr.name}")
if isinstance(expr, F)
else expr.prefix_references(prefix)
for expr in self.get_source_expressions()
]
)
return clone
def get_group_by_cols(self, alias=None):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""Return the underlying field types used by this aggregate."""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
if hasattr(expr, "flatten"):
yield from expr.flatten()
else:
yield expr
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, EXISTS expressions need
to be wrapped in CASE WHEN on Oracle.
"""
if hasattr(self.output_field, "select_format"):
return self.output_field.select_format(compiler, sql, params)
return sql, params
@deconstructible
class Expression(BaseExpression, Combinable):
"""An expression that can be combined with other expressions."""
@cached_property
def identity(self):
constructor_signature = inspect.signature(self.__init__)
args, kwargs = self._constructor_args
signature = constructor_signature.bind_partial(*args, **kwargs)
signature.apply_defaults()
arguments = signature.arguments.items()
identity = [self.__class__]
for arg, value in arguments:
if isinstance(value, fields.Field):
if value.name and value.model:
value = (value.model._meta.label, value.name)
else:
value = type(value)
else:
value = make_hashable(value)
identity.append((arg, value))
return tuple(identity)
def __eq__(self, other):
if not isinstance(other, Expression):
return NotImplemented
return other.identity == self.identity
def __hash__(self):
return hash(self.identity)
# Type inference for CombinedExpression.output_field.
# Missing items will result in FieldError, by design.
#
# The current approach for NULL is based on lowest common denominator behavior
# i.e. if one of the supported databases is raising an error (rather than
# return NULL) for `val <op> NULL`, then Django raises FieldError.
NoneType = type(None)
_connector_combinations = [
# Numeric operations - operands of same type.
{
connector: [
(fields.IntegerField, fields.IntegerField, fields.IntegerField),
(fields.FloatField, fields.FloatField, fields.FloatField),
(fields.DecimalField, fields.DecimalField, fields.DecimalField),
]
for connector in (
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
# Behavior for DIV with integer arguments follows Postgres/SQLite,
# not MySQL/Oracle.
Combinable.DIV,
Combinable.MOD,
Combinable.POW,
)
},
# Numeric operations - operands of different type.
{
connector: [
(fields.IntegerField, fields.DecimalField, fields.DecimalField),
(fields.DecimalField, fields.IntegerField, fields.DecimalField),
(fields.IntegerField, fields.FloatField, fields.FloatField),
(fields.FloatField, fields.IntegerField, fields.FloatField),
]
for connector in (
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
Combinable.DIV,
)
},
# Bitwise operators.
{
connector: [
(fields.IntegerField, fields.IntegerField, fields.IntegerField),
]
for connector in (
Combinable.BITAND,
Combinable.BITOR,
Combinable.BITLEFTSHIFT,
Combinable.BITRIGHTSHIFT,
Combinable.BITXOR,
)
},
# Numeric with NULL.
{
connector: [
(field_type, NoneType, field_type),
(NoneType, field_type, field_type),
]
for connector in (
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
Combinable.DIV,
Combinable.MOD,
Combinable.POW,
)
for field_type in (fields.IntegerField, fields.DecimalField, fields.FloatField)
},
# Date/DateTimeField/DurationField/TimeField.
{
Combinable.ADD: [
# Date/DateTimeField.
(fields.DateField, fields.DurationField, fields.DateTimeField),
(fields.DateTimeField, fields.DurationField, fields.DateTimeField),
(fields.DurationField, fields.DateField, fields.DateTimeField),
(fields.DurationField, fields.DateTimeField, fields.DateTimeField),
# DurationField.
(fields.DurationField, fields.DurationField, fields.DurationField),
# TimeField.
(fields.TimeField, fields.DurationField, fields.TimeField),
(fields.DurationField, fields.TimeField, fields.TimeField),
],
},
{
Combinable.SUB: [
# Date/DateTimeField.
(fields.DateField, fields.DurationField, fields.DateTimeField),
(fields.DateTimeField, fields.DurationField, fields.DateTimeField),
(fields.DateField, fields.DateField, fields.DurationField),
(fields.DateField, fields.DateTimeField, fields.DurationField),
(fields.DateTimeField, fields.DateField, fields.DurationField),
(fields.DateTimeField, fields.DateTimeField, fields.DurationField),
# DurationField.
(fields.DurationField, fields.DurationField, fields.DurationField),
# TimeField.
(fields.TimeField, fields.DurationField, fields.TimeField),
(fields.TimeField, fields.TimeField, fields.DurationField),
],
},
]
_connector_combinators = defaultdict(list)
def register_combinable_fields(lhs, connector, rhs, result):
"""
Register combinable types:
lhs <connector> rhs -> result
e.g.
register_combinable_fields(
IntegerField, Combinable.ADD, FloatField, FloatField
)
"""
_connector_combinators[connector].append((lhs, rhs, result))
for d in _connector_combinations:
for connector, field_types in d.items():
for lhs, rhs, result in field_types:
register_combinable_fields(lhs, connector, rhs, result)
@functools.lru_cache(maxsize=128)
def _resolve_combined_type(connector, lhs_type, rhs_type):
combinators = _connector_combinators.get(connector, ())
for combinator_lhs_type, combinator_rhs_type, combined_type in combinators:
if issubclass(lhs_type, combinator_lhs_type) and issubclass(
rhs_type, combinator_rhs_type
):
return combined_type
class CombinedExpression(SQLiteNumericMixin, Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super().__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def _resolve_output_field(self):
# We avoid using super() here for reasons given in
# Expression._resolve_output_field()
combined_type = _resolve_combined_type(
self.connector,
type(self.lhs._output_field_or_none),
type(self.rhs._output_field_or_none),
)
if combined_type is None:
raise FieldError(
f"Cannot infer type of {self.connector!r} expression involving these "
f"types: {self.lhs.output_field.__class__.__name__}, "
f"{self.rhs.output_field.__class__.__name__}. You must set "
f"output_field."
)
return combined_type()
def as_sql(self, compiler, connection):
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = "(%s)"
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
lhs = self.lhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
rhs = self.rhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
if not isinstance(self, (DurationExpression, TemporalSubtraction)):
try:
lhs_type = lhs.output_field.get_internal_type()
except (AttributeError, FieldError):
lhs_type = None
try:
rhs_type = rhs.output_field.get_internal_type()
except (AttributeError, FieldError):
rhs_type = None
if "DurationField" in {lhs_type, rhs_type} and lhs_type != rhs_type:
return DurationExpression(
self.lhs, self.connector, self.rhs
).resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
datetime_fields = {"DateField", "DateTimeField", "TimeField"}
if (
self.connector == self.SUB
and lhs_type in datetime_fields
and lhs_type == rhs_type
):
return TemporalSubtraction(self.lhs, self.rhs).resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
c = self.copy()
c.is_summary = summarize
c.lhs = lhs
c.rhs = rhs
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == "DurationField":
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
if connection.features.has_native_duration_field:
return super().as_sql(compiler, connection)
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = "(%s)"
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
if self.connector in {Combinable.MUL, Combinable.DIV}:
try:
lhs_type = self.lhs.output_field.get_internal_type()
rhs_type = self.rhs.output_field.get_internal_type()
except (AttributeError, FieldError):
pass
else:
allowed_fields = {
"DecimalField",
"DurationField",
"FloatField",
"IntegerField",
}
if lhs_type not in allowed_fields or rhs_type not in allowed_fields:
raise DatabaseError(
f"Invalid arguments for operator {self.connector}."
)
return sql, params
class TemporalSubtraction(CombinedExpression):
output_field = fields.DurationField()
def __init__(self, lhs, rhs):
super().__init__(lhs, self.SUB, rhs)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
lhs = compiler.compile(self.lhs)
rhs = compiler.compile(self.rhs)
return connection.ops.subtract_temporals(
self.lhs.output_field.get_internal_type(), lhs, rhs
)
@deconstructible(path="django.db.models.F")
class F(Combinable):
"""An object capable of resolving references to existing query objects."""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def replace_expressions(self, replacements):
return replacements.get(self, self)
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.name == other.name
def __hash__(self):
return hash(self.name)
class ResolvedOuterRef(F):
"""
An object that contains a reference to an outer query.
In this case, the reference to the outer query has been resolved because
the inner query has been used as a subquery.
"""
contains_aggregate = False
contains_over_clause = False
def as_sql(self, *args, **kwargs):
raise ValueError(
"This queryset contains a reference to an outer query and may "
"only be used in a subquery."
)
def resolve_expression(self, *args, **kwargs):
col = super().resolve_expression(*args, **kwargs)
# FIXME: Rename possibly_multivalued to multivalued and fix detection
# for non-multivalued JOINs (e.g. foreign key fields). This should take
# into account only many-to-many and one-to-many relationships.
col.possibly_multivalued = LOOKUP_SEP in self.name
return col
def relabeled_clone(self, relabels):
return self
def get_group_by_cols(self, alias=None):
return []
class OuterRef(F):
contains_aggregate = False
def resolve_expression(self, *args, **kwargs):
if isinstance(self.name, self.__class__):
return self.name
return ResolvedOuterRef(self.name)
def relabeled_clone(self, relabels):
return self
@deconstructible(path="django.db.models.Func")
class Func(SQLiteNumericMixin, Expression):
"""An SQL function call."""
function = None
template = "%(function)s(%(expressions)s)"
arg_joiner = ", "
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, output_field=None, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)"
% (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
super().__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = {**self.extra, **self._get_repr_options()}
if extra:
extra = ", ".join(
str(key) + "=" + str(val) for key, val in sorted(extra.items())
)
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def _get_repr_options(self):
"""Return a dict of extra __init__() options to include in the repr."""
return {}
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def as_sql(
self,
compiler,
connection,
function=None,
template=None,
arg_joiner=None,
**extra_context,
):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
try:
arg_sql, arg_params = compiler.compile(arg)
except EmptyResultSet:
empty_result_set_value = getattr(
arg, "empty_result_set_value", NotImplemented
)
if empty_result_set_value is NotImplemented:
raise
arg_sql, arg_params = compiler.compile(Value(empty_result_set_value))
sql_parts.append(arg_sql)
params.extend(arg_params)
data = {**self.extra, **extra_context}
# Use the first supplied value in this order: the parameter to this
# method, a value supplied in __init__()'s **extra (the value in
# `data`), or the value defined on the class.
if function is not None:
data["function"] = function
else:
data.setdefault("function", self.function)
template = template or data.get("template", self.template)
arg_joiner = arg_joiner or data.get("arg_joiner", self.arg_joiner)
data["expressions"] = data["field"] = arg_joiner.join(sql_parts)
return template % data, params
def copy(self):
copy = super().copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
@deconstructible(path="django.db.models.Value")
class Value(SQLiteNumericMixin, Expression):
"""Represent a wrapped value as a node within an expression."""
# Provide a default value for `for_save` in order to allow unresolved
# instances to be compiled until a decision is taken in #25425.
for_save = False
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super().__init__(output_field=output_field)
self.value = value
def __repr__(self):
return f"{self.__class__.__name__}({self.value!r})"
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
output_field = self._output_field_or_none
if output_field is not None:
if self.for_save:
val = output_field.get_db_prep_save(val, connection=connection)
else:
val = output_field.get_db_prep_value(val, connection=connection)
if hasattr(output_field, "get_placeholder"):
return output_field.get_placeholder(val, compiler, connection), [val]
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return "NULL", []
return "%s", [val]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self, alias=None):
return []
def _resolve_output_field(self):
if isinstance(self.value, str):
return fields.CharField()
if isinstance(self.value, bool):
return fields.BooleanField()
if isinstance(self.value, int):
return fields.IntegerField()
if isinstance(self.value, float):
return fields.FloatField()
if isinstance(self.value, datetime.datetime):
return fields.DateTimeField()
if isinstance(self.value, datetime.date):
return fields.DateField()
if isinstance(self.value, datetime.time):
return fields.TimeField()
if isinstance(self.value, datetime.timedelta):
return fields.DurationField()
if isinstance(self.value, Decimal):
return fields.DecimalField()
if isinstance(self.value, bytes):
return fields.BinaryField()
if isinstance(self.value, UUID):
return fields.UUIDField()
@property
def empty_result_set_value(self):
return self.value
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super().__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return "(%s)" % self.sql, self.params
def get_group_by_cols(self, alias=None):
return [self]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# Resolve parents fields used in raw SQL.
if query.model:
for parent in query.model._meta.get_parent_list():
for parent_field in parent._meta.local_fields:
_, column_name = parent_field.get_attname_column()
if column_name.lower() in self.sql.lower():
query.resolve_ref(
parent_field.name, allow_joins, reuse, summarize
)
break
return super().resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return "*", []
class Col(Expression):
contains_column_references = True
possibly_multivalued = False
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super().__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
alias, target = self.alias, self.target
identifiers = (alias, str(target)) if alias else (str(target),)
return "{}({})".format(self.__class__.__name__, ", ".join(identifiers))
def as_sql(self, compiler, connection):
alias, column = self.alias, self.target.column
identifiers = (alias, column) if alias else (column,)
sql = ".".join(map(compiler.quote_name_unless_alias, identifiers))
return sql, []
def relabeled_clone(self, relabels):
if self.alias is None:
return self
return self.__class__(
relabels.get(self.alias, self.alias), self.target, self.output_field
)
def get_group_by_cols(self, alias=None):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return self.output_field.get_db_converters(
connection
) + self.target.get_db_converters(connection)
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super().__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
(self.source,) = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return connection.ops.quote_name(self.refs), []
def get_group_by_cols(self, alias=None):
return [self]
class ExpressionList(Func):
"""
An expression containing multiple expressions. Can be used to provide a
list of expressions as an argument to another expression, like a partition
clause.
"""
template = "%(expressions)s"
def __init__(self, *expressions, **extra):
if not expressions:
raise ValueError(
"%s requires at least one expression." % self.__class__.__name__
)
super().__init__(*expressions, **extra)
def __str__(self):
return self.arg_joiner.join(str(arg) for arg in self.source_expressions)
def as_sqlite(self, compiler, connection, **extra_context):
# Casting to numeric is unnecessary.
return self.as_sql(compiler, connection, **extra_context)
class OrderByList(Func):
template = "ORDER BY %(expressions)s"
def __init__(self, *expressions, **extra):
expressions = (
(
OrderBy(F(expr[1:]), descending=True)
if isinstance(expr, str) and expr[0] == "-"
else expr
)
for expr in expressions
)
super().__init__(*expressions, **extra)
def as_sql(self, *args, **kwargs):
if not self.source_expressions:
return "", ()
return super().as_sql(*args, **kwargs)
def get_group_by_cols(self):
group_by_cols = []
for order_by in self.get_source_expressions():
group_by_cols.extend(order_by.get_group_by_cols())
return group_by_cols
@deconstructible(path="django.db.models.ExpressionWrapper")
class ExpressionWrapper(SQLiteNumericMixin, Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super().__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def get_group_by_cols(self, alias=None):
if isinstance(self.expression, Expression):
expression = self.expression.copy()
expression.output_field = self.output_field
return expression.get_group_by_cols(alias=alias)
# For non-expressions e.g. an SQL WHERE clause, the entire
# `expression` must be included in the GROUP BY clause.
return super().get_group_by_cols()
def as_sql(self, compiler, connection):
return compiler.compile(self.expression)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
@deconstructible(path="django.db.models.When")
class When(Expression):
template = "WHEN %(condition)s THEN %(result)s"
# This isn't a complete conditional expression, must be used in Case().
conditional = False
def __init__(self, condition=None, then=None, **lookups):
if lookups:
if condition is None:
condition, lookups = Q(**lookups), None
elif getattr(condition, "conditional", False):
condition, lookups = Q(condition, **lookups), None
if condition is None or not getattr(condition, "conditional", False) or lookups:
raise TypeError(
"When() supports a Q object, a boolean expression, or lookups "
"as a condition."
)
if isinstance(condition, Q) and not condition:
raise ValueError("An empty Q() can't be used as a When() condition.")
super().__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
if hasattr(c.condition, "resolve_expression"):
c.condition = c.condition.resolve_expression(
query, allow_joins, reuse, summarize, False
)
c.result = c.result.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = extra_context
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
# Filters that match everything are handled as empty strings in the
# WHERE clause, but in a CASE WHEN expression they must use a predicate
# that's always True.
if condition_sql == "":
if connection.features.supports_boolean_expr_in_select_clause:
condition_sql, condition_params = compiler.compile(Value(True))
else:
condition_sql, condition_params = "1=1", ()
template_params["condition"] = condition_sql
result_sql, result_params = compiler.compile(self.result)
template_params["result"] = result_sql
template = template or self.template
return template % template_params, (
*sql_params,
*condition_params,
*result_params,
)
def get_group_by_cols(self, alias=None):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
@deconstructible(path="django.db.models.Case")
class Case(SQLiteNumericMixin, Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = "CASE %(cases)s ELSE %(default)s END"
case_joiner = " "
def __init__(self, *cases, default=None, output_field=None, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
super().__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
self.extra = extra
def __str__(self):
return "CASE %s, ELSE %r" % (
", ".join(str(c) for c in self.cases),
self.default,
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
*self.cases, self.default = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
c.default = c.default.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def copy(self):
c = super().copy()
c.cases = c.cases[:]
return c
def as_sql(
self, compiler, connection, template=None, case_joiner=None, **extra_context
):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = {**self.extra, **extra_context}
case_parts = []
sql_params = []
for case in self.cases:
try:
case_sql, case_params = compiler.compile(case)
except EmptyResultSet:
continue
case_parts.append(case_sql)
sql_params.extend(case_params)
default_sql, default_params = compiler.compile(self.default)
if not case_parts:
return default_sql, default_params
case_joiner = case_joiner or self.case_joiner
template_params["cases"] = case_joiner.join(case_parts)
template_params["default"] = default_sql
sql_params.extend(default_params)
template = template or template_params.get("template", self.template)
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
def get_group_by_cols(self, alias=None):
if not self.cases:
return self.default.get_group_by_cols(alias)
return super().get_group_by_cols(alias)
class Subquery(BaseExpression, Combinable):
"""
An explicit subquery. It may contain OuterRef() references to the outer
query which will be resolved when it is applied to that query.
"""
template = "(%(subquery)s)"
contains_aggregate = False
empty_result_set_value = None
def __init__(self, queryset, output_field=None, **extra):
# Allow the usage of both QuerySet and sql.Query objects.
self.query = getattr(queryset, "query", queryset).clone()
self.query.subquery = True
self.extra = extra
super().__init__(output_field)
def get_source_expressions(self):
return [self.query]
def set_source_expressions(self, exprs):
self.query = exprs[0]
def _resolve_output_field(self):
return self.query.output_field
def copy(self):
clone = super().copy()
clone.query = clone.query.clone()
return clone
@property
def external_aliases(self):
return self.query.external_aliases
def get_external_cols(self):
return self.query.get_external_cols()
def as_sql(self, compiler, connection, template=None, query=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = {**self.extra, **extra_context}
query = query or self.query
subquery_sql, sql_params = query.as_sql(compiler, connection)
template_params["subquery"] = subquery_sql[1:-1]
template = template or template_params.get("template", self.template)
sql = template % template_params
return sql, sql_params
def get_group_by_cols(self, alias=None):
# If this expression is referenced by an alias for an explicit GROUP BY
# through values() a reference to this expression and not the
# underlying .query must be returned to ensure external column
# references are not grouped against as well.
if alias:
return [Ref(alias, self)]
return self.query.get_group_by_cols()
class Exists(Subquery):
template = "EXISTS(%(subquery)s)"
output_field = fields.BooleanField()
def __init__(self, queryset, negated=False, **kwargs):
self.negated = negated
super().__init__(queryset, **kwargs)
def __invert__(self):
clone = self.copy()
clone.negated = not self.negated
return clone
def as_sql(self, compiler, connection, template=None, **extra_context):
query = self.query.exists(using=connection.alias)
try:
sql, params = super().as_sql(
compiler,
connection,
template=template,
query=query,
**extra_context,
)
except EmptyResultSet:
if self.negated:
features = compiler.connection.features
if not features.supports_boolean_expr_in_select_clause:
return "1=1", ()
return compiler.compile(Value(True))
raise
if self.negated:
sql = "NOT {}".format(sql)
return sql, params
def select_format(self, compiler, sql, params):
# Wrap EXISTS() with a CASE WHEN expression if a database backend
# (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP
# BY list.
if not compiler.connection.features.supports_boolean_expr_in_select_clause:
sql = "CASE WHEN {} THEN 1 ELSE 0 END".format(sql)
return sql, params
@deconstructible(path="django.db.models.OrderBy")
class OrderBy(Expression):
template = "%(expression)s %(ordering)s"
conditional = False
def __init__(self, expression, descending=False, nulls_first=None, nulls_last=None):
if nulls_first and nulls_last:
raise ValueError("nulls_first and nulls_last are mutually exclusive")
if nulls_first is False or nulls_last is False:
# When the deprecation ends, replace with:
# raise ValueError(
# "nulls_first and nulls_last values must be True or None."
# )
warnings.warn(
"Passing nulls_first=False or nulls_last=False is deprecated, use None "
"instead.",
RemovedInDjango50Warning,
stacklevel=2,
)
self.nulls_first = nulls_first
self.nulls_last = nulls_last
self.descending = descending
if not hasattr(expression, "resolve_expression"):
raise ValueError("expression must be an expression type")
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending
)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection, template=None, **extra_context):
template = template or self.template
if connection.features.supports_order_by_nulls_modifier:
if self.nulls_last:
template = "%s NULLS LAST" % template
elif self.nulls_first:
template = "%s NULLS FIRST" % template
else:
if self.nulls_last and not (
self.descending and connection.features.order_by_nulls_first
):
template = "%%(expression)s IS NULL, %s" % template
elif self.nulls_first and not (
not self.descending and connection.features.order_by_nulls_first
):
template = "%%(expression)s IS NOT NULL, %s" % template
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {
"expression": expression_sql,
"ordering": "DESC" if self.descending else "ASC",
**extra_context,
}
params *= template.count("%(expression)s")
return (template % placeholders).rstrip(), params
def as_oracle(self, compiler, connection):
# Oracle doesn't allow ORDER BY EXISTS() or filters unless it's wrapped
# in a CASE WHEN.
if connection.ops.conditional_expression_supported_in_where_clause(
self.expression
):
copy = self.copy()
copy.expression = Case(
When(self.expression, then=True),
default=False,
)
return copy.as_sql(compiler, connection)
return self.as_sql(compiler, connection)
def get_group_by_cols(self, alias=None):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
if self.nulls_first:
self.nulls_last = True
self.nulls_first = None
elif self.nulls_last:
self.nulls_first = True
self.nulls_last = None
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
class Window(SQLiteNumericMixin, Expression):
template = "%(expression)s OVER (%(window)s)"
# Although the main expression may either be an aggregate or an
# expression with an aggregate function, the GROUP BY that will
# be introduced in the query as a result is not desired.
contains_aggregate = False
contains_over_clause = True
def __init__(
self,
expression,
partition_by=None,
order_by=None,
frame=None,
output_field=None,
):
self.partition_by = partition_by
self.order_by = order_by
self.frame = frame
if not getattr(expression, "window_compatible", False):
raise ValueError(
"Expression '%s' isn't compatible with OVER clauses."
% expression.__class__.__name__
)
if self.partition_by is not None:
if not isinstance(self.partition_by, (tuple, list)):
self.partition_by = (self.partition_by,)
self.partition_by = ExpressionList(*self.partition_by)
if self.order_by is not None:
if isinstance(self.order_by, (list, tuple)):
self.order_by = OrderByList(*self.order_by)
elif isinstance(self.order_by, (BaseExpression, str)):
self.order_by = OrderByList(self.order_by)
else:
raise ValueError(
"Window.order_by must be either a string reference to a "
"field, an expression, or a list or tuple of them."
)
super().__init__(output_field=output_field)
self.source_expression = self._parse_expressions(expression)[0]
def _resolve_output_field(self):
return self.source_expression.output_field
def get_source_expressions(self):
return [self.source_expression, self.partition_by, self.order_by, self.frame]
def set_source_expressions(self, exprs):
self.source_expression, self.partition_by, self.order_by, self.frame = exprs
def as_sql(self, compiler, connection, template=None):
connection.ops.check_expression_support(self)
if not connection.features.supports_over_clause:
raise NotSupportedError("This backend does not support window expressions.")
expr_sql, params = compiler.compile(self.source_expression)
window_sql, window_params = [], ()
if self.partition_by is not None:
sql_expr, sql_params = self.partition_by.as_sql(
compiler=compiler,
connection=connection,
template="PARTITION BY %(expressions)s",
)
window_sql.append(sql_expr)
window_params += tuple(sql_params)
if self.order_by is not None:
order_sql, order_params = compiler.compile(self.order_by)
window_sql.append(order_sql)
window_params += tuple(order_params)
if self.frame:
frame_sql, frame_params = compiler.compile(self.frame)
window_sql.append(frame_sql)
window_params += tuple(frame_params)
template = template or self.template
return (
template % {"expression": expr_sql, "window": " ".join(window_sql).strip()},
(*params, *window_params),
)
def as_sqlite(self, compiler, connection):
if isinstance(self.output_field, fields.DecimalField):
# Casting to numeric must be outside of the window expression.
copy = self.copy()
source_expressions = copy.get_source_expressions()
source_expressions[0].output_field = fields.FloatField()
copy.set_source_expressions(source_expressions)
return super(Window, copy).as_sqlite(compiler, connection)
return self.as_sql(compiler, connection)
def __str__(self):
return "{} OVER ({}{}{})".format(
str(self.source_expression),
"PARTITION BY " + str(self.partition_by) if self.partition_by else "",
str(self.order_by or ""),
str(self.frame or ""),
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self, alias=None):
group_by_cols = []
if self.partition_by:
group_by_cols.extend(self.partition_by.get_group_by_cols())
if self.order_by is not None:
group_by_cols.extend(self.order_by.get_group_by_cols())
return group_by_cols
class WindowFrame(Expression):
"""
Model the frame clause in window expressions. There are two types of frame
clauses which are subclasses, however, all processing and validation (by no
means intended to be complete) is done here. Thus, providing an end for a
frame is optional (the default is UNBOUNDED FOLLOWING, which is the last
row in the frame).
"""
template = "%(frame_type)s BETWEEN %(start)s AND %(end)s"
def __init__(self, start=None, end=None):
self.start = Value(start)
self.end = Value(end)
def set_source_expressions(self, exprs):
self.start, self.end = exprs
def get_source_expressions(self):
return [self.start, self.end]
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
start, end = self.window_frame_start_end(
connection, self.start.value, self.end.value
)
return (
self.template
% {
"frame_type": self.frame_type,
"start": start,
"end": end,
},
[],
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self, alias=None):
return []
def __str__(self):
if self.start.value is not None and self.start.value < 0:
start = "%d %s" % (abs(self.start.value), connection.ops.PRECEDING)
elif self.start.value is not None and self.start.value == 0:
start = connection.ops.CURRENT_ROW
else:
start = connection.ops.UNBOUNDED_PRECEDING
if self.end.value is not None and self.end.value > 0:
end = "%d %s" % (self.end.value, connection.ops.FOLLOWING)
elif self.end.value is not None and self.end.value == 0:
end = connection.ops.CURRENT_ROW
else:
end = connection.ops.UNBOUNDED_FOLLOWING
return self.template % {
"frame_type": self.frame_type,
"start": start,
"end": end,
}
def window_frame_start_end(self, connection, start, end):
raise NotImplementedError("Subclasses must implement window_frame_start_end().")
class RowRange(WindowFrame):
frame_type = "ROWS"
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_rows_start_end(start, end)
class ValueRange(WindowFrame):
frame_type = "RANGE"
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_range_start_end(start, end)
|
c9140f75a8cd1ff73c5444e23a15a2688fe21b23ef56c2ef0e25705628067c36 | import warnings
from django.db.models.lookups import (
Exact,
GreaterThan,
GreaterThanOrEqual,
In,
IsNull,
LessThan,
LessThanOrEqual,
)
from django.utils.deprecation import RemovedInDjango50Warning
class MultiColSource:
contains_aggregate = False
contains_over_clause = False
def __init__(self, alias, targets, sources, field):
self.targets, self.sources, self.field, self.alias = (
targets,
sources,
field,
alias,
)
self.output_field = self.field
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.alias, self.field)
def relabeled_clone(self, relabels):
return self.__class__(
relabels.get(self.alias, self.alias), self.targets, self.sources, self.field
)
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def resolve_expression(self, *args, **kwargs):
return self
def get_normalized_value(value, lhs):
from django.db.models import Model
if isinstance(value, Model):
if value.pk is None:
# When the deprecation ends, replace with:
# raise ValueError(
# "Model instances passed to related filters must be saved."
# )
warnings.warn(
"Passing unsaved model instances to related filters is deprecated.",
RemovedInDjango50Warning,
)
value_list = []
sources = lhs.output_field.path_infos[-1].target_fields
for source in sources:
while not isinstance(value, source.model) and source.remote_field:
source = source.remote_field.model._meta.get_field(
source.remote_field.field_name
)
try:
value_list.append(getattr(value, source.attname))
except AttributeError:
# A case like Restaurant.objects.filter(place=restaurant_instance),
# where place is a OneToOneField and the primary key of Restaurant.
return (value.pk,)
return tuple(value_list)
if not isinstance(value, tuple):
return (value,)
return value
class RelatedIn(In):
def get_prep_lookup(self):
if not isinstance(self.lhs, MultiColSource):
if self.rhs_is_direct_value():
# If we get here, we are dealing with single-column relations.
self.rhs = [get_normalized_value(val, self.lhs)[0] for val in self.rhs]
# We need to run the related field's get_prep_value(). Consider
# case ForeignKey to IntegerField given value 'abc'. The
# ForeignKey itself doesn't have validation for non-integers,
# so we must run validation using the target field.
if hasattr(self.lhs.output_field, "path_infos"):
# Run the target field's get_prep_value. We can safely
# assume there is only one as we don't get to the direct
# value branch otherwise.
target_field = self.lhs.output_field.path_infos[-1].target_fields[
-1
]
self.rhs = [target_field.get_prep_value(v) for v in self.rhs]
elif not getattr(self.rhs, "has_select_fields", True) and not getattr(
self.lhs.field.target_field, "primary_key", False
):
self.rhs.clear_select_clause()
if (
getattr(self.lhs.output_field, "primary_key", False)
and self.lhs.output_field.model == self.rhs.model
):
# A case like
# Restaurant.objects.filter(place__in=restaurant_qs), where
# place is a OneToOneField and the primary key of
# Restaurant.
target_field = self.lhs.field.name
else:
target_field = self.lhs.field.target_field.name
self.rhs.add_fields([target_field], True)
return super().get_prep_lookup()
def as_sql(self, compiler, connection):
if isinstance(self.lhs, MultiColSource):
# For multicolumn lookups we need to build a multicolumn where clause.
# This clause is either a SubqueryConstraint (for values that need
# to be compiled to SQL) or an OR-combined list of
# (col1 = val1 AND col2 = val2 AND ...) clauses.
from django.db.models.sql.where import (
AND,
OR,
SubqueryConstraint,
WhereNode,
)
root_constraint = WhereNode(connector=OR)
if self.rhs_is_direct_value():
values = [get_normalized_value(value, self.lhs) for value in self.rhs]
for value in values:
value_constraint = WhereNode()
for source, target, val in zip(
self.lhs.sources, self.lhs.targets, value
):
lookup_class = target.get_lookup("exact")
lookup = lookup_class(
target.get_col(self.lhs.alias, source), val
)
value_constraint.add(lookup, AND)
root_constraint.add(value_constraint, OR)
else:
root_constraint.add(
SubqueryConstraint(
self.lhs.alias,
[target.column for target in self.lhs.targets],
[source.name for source in self.lhs.sources],
self.rhs,
),
AND,
)
return root_constraint.as_sql(compiler, connection)
return super().as_sql(compiler, connection)
class RelatedLookupMixin:
def get_prep_lookup(self):
if not isinstance(self.lhs, MultiColSource) and not hasattr(
self.rhs, "resolve_expression"
):
# If we get here, we are dealing with single-column relations.
self.rhs = get_normalized_value(self.rhs, self.lhs)[0]
# We need to run the related field's get_prep_value(). Consider case
# ForeignKey to IntegerField given value 'abc'. The ForeignKey itself
# doesn't have validation for non-integers, so we must run validation
# using the target field.
if self.prepare_rhs and hasattr(self.lhs.output_field, "path_infos"):
# Get the target field. We can safely assume there is only one
# as we don't get to the direct value branch otherwise.
target_field = self.lhs.output_field.path_infos[-1].target_fields[-1]
self.rhs = target_field.get_prep_value(self.rhs)
return super().get_prep_lookup()
def as_sql(self, compiler, connection):
if isinstance(self.lhs, MultiColSource):
assert self.rhs_is_direct_value()
self.rhs = get_normalized_value(self.rhs, self.lhs)
from django.db.models.sql.where import AND, WhereNode
root_constraint = WhereNode()
for target, source, val in zip(
self.lhs.targets, self.lhs.sources, self.rhs
):
lookup_class = target.get_lookup(self.lookup_name)
root_constraint.add(
lookup_class(target.get_col(self.lhs.alias, source), val), AND
)
return root_constraint.as_sql(compiler, connection)
return super().as_sql(compiler, connection)
class RelatedExact(RelatedLookupMixin, Exact):
pass
class RelatedLessThan(RelatedLookupMixin, LessThan):
pass
class RelatedGreaterThan(RelatedLookupMixin, GreaterThan):
pass
class RelatedGreaterThanOrEqual(RelatedLookupMixin, GreaterThanOrEqual):
pass
class RelatedLessThanOrEqual(RelatedLookupMixin, LessThanOrEqual):
pass
class RelatedIsNull(RelatedLookupMixin, IsNull):
pass
|
5a1e2b411f7545d3253a8d807e577cc1a0bf02a60852ca590a664a42655987ff | import collections
import json
import re
from functools import partial
from itertools import chain
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DatabaseError, NotSupportedError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import F, OrderBy, RawSQL, Ref, Value
from django.db.models.functions import Cast, Random
from django.db.models.lookups import Lookup
from django.db.models.query_utils import select_related_descend
from django.db.models.sql.constants import (
CURSOR,
GET_ITERATOR_CHUNK_SIZE,
MULTI,
NO_RESULTS,
ORDER_DIR,
SINGLE,
)
from django.db.models.sql.query import Query, get_order_dir
from django.db.models.sql.where import AND
from django.db.transaction import TransactionManagementError
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
from django.utils.regex_helper import _lazy_re_compile
class SQLCompiler:
# Multiline ordering SQL clause may appear from RawSQL.
ordering_parts = _lazy_re_compile(
r"^(.*)\s(?:ASC|DESC).*",
re.MULTILINE | re.DOTALL,
)
def __init__(self, query, connection, using, elide_empty=True):
self.query = query
self.connection = connection
self.using = using
# Some queries, e.g. coalesced aggregation, need to be executed even if
# they would return an empty result set.
self.elide_empty = elide_empty
self.quote_cache = {"*": "*"}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self._meta_ordering = None
def __repr__(self):
return (
f"<{self.__class__.__qualname__} "
f"model={self.query.model.__qualname__} "
f"connection={self.connection!r} using={self.using!r}>"
)
def setup_query(self, with_col_aliases=False):
if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select(
with_col_aliases=with_col_aliases,
)
self.col_count = len(self.select)
def pre_sql_setup(self, with_col_aliases=False):
"""
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query(with_col_aliases=with_col_aliases)
order_by = self.get_order_by()
self.where, self.having, self.qualify = self.query.where.split_having_qualify(
must_group_by=self.query.group_by is not None
)
extra_select = self.get_extra_select(order_by, self.select)
self.has_extra_select = bool(extra_select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk')
# .annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, "as_sql"):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
ref_sources = {expr.source for expr in expressions if isinstance(expr, Ref)}
for expr, _, _ in select:
# Skip members of the select clause that are already included
# by reference.
if expr in ref_sources:
continue
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
if not self._meta_ordering:
for expr, (sql, params, is_ref) in order_by:
# Skip references to the SELECT clause, as all expressions in
# the SELECT clause are already part of the GROUP BY.
if not is_ref:
expressions.extend(expr.get_group_by_cols())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
sql, params = self.compile(expr)
sql, params = expr.select_format(self, sql, params)
params_hash = make_hashable(params)
if (sql, params_hash) not in seen:
result.append((sql, params))
seen.add((sql, params_hash))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key.
if self.connection.features.allows_group_by_pk:
# Determine if the main model's primary key is in the query.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (
getattr(expr, "target", None) == self.query.model._meta.pk
and getattr(expr, "alias", None) == self.query.base_table
):
pk = expr
break
# If the main model's primary key is in the query, group by that
# field, HAVING expressions, and expressions associated with tables
# that don't have a primary key included in the grouped columns.
if pk:
pk_aliases = {
expr.alias
for expr in expressions
if hasattr(expr, "target") and expr.target.primary_key
}
expressions = [pk] + [
expr
for expr in expressions
if expr in having
or (
getattr(expr, "alias", None) is not None
and expr.alias not in pk_aliases
)
]
elif self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
# Unmanaged models are excluded because they could be representing
# database views on which the optimization might not be allowed.
pks = {
expr
for expr in expressions
if (
hasattr(expr, "target")
and expr.target.primary_key
and self.connection.features.allows_group_by_selected_pks_on_model(
expr.target.model
)
)
}
aliases = {expr.alias for expr in pks}
expressions = [
expr
for expr in expressions
if expr in pks or getattr(expr, "alias", None) not in aliases
]
return expressions
def get_select(self, with_col_aliases=False):
"""
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- The base model of the query.
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
if self.query.default_cols:
cols = self.get_default_columns()
else:
# self.query.select is a special case. These columns never go to
# any model.
cols = self.query.select
if cols:
select_list = []
for col in cols:
select_list.append(select_idx)
select.append((col, None))
select_idx += 1
klass_info = {
"model": self.query.model,
"select_fields": select_list,
}
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info["related_klass_infos"] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info["related_klass_infos"]:
if ki["from_parent"]:
ki["select_fields"] = (
klass_info["select_fields"] + ki["select_fields"]
)
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
col_idx = 1
for col, alias in select:
try:
sql, params = self.compile(col)
except EmptyResultSet:
empty_result_set_value = getattr(
col, "empty_result_set_value", NotImplemented
)
if empty_result_set_value is NotImplemented:
# Select a predicate that's always False.
sql, params = "0", ()
else:
sql, params = self.compile(Value(empty_result_set_value))
else:
sql, params = col.select_format(self, sql, params)
if alias is None and with_col_aliases:
alias = f"col{col_idx}"
col_idx += 1
ret.append((col, (sql, params), alias))
return ret, klass_info, annotations
def _order_by_pairs(self):
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
elif self.query.order_by:
ordering = self.query.order_by
elif (meta := self.query.get_meta()) and meta.ordering:
ordering = meta.ordering
self._meta_ordering = ordering
else:
ordering = []
if self.query.standard_ordering:
default_order, _ = ORDER_DIR["ASC"]
else:
default_order, _ = ORDER_DIR["DESC"]
for field in ordering:
if hasattr(field, "resolve_expression"):
if isinstance(field, Value):
# output_field must be resolved for constants.
field = Cast(field, field.output_field)
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field = field.copy()
field.reverse_ordering()
yield field, False
continue
if field == "?": # random
yield OrderBy(Random()), False
continue
col, order = get_order_dir(field, default_order)
descending = order == "DESC"
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
yield (
OrderBy(
Ref(col, self.query.annotation_select[col]),
descending=descending,
),
True,
)
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT
# clause.
if self.query.combinator and self.select:
# Don't use the resolved annotation because other
# combinated queries might define it differently.
expr = F(col)
else:
expr = self.query.annotations[col]
if isinstance(expr, Value):
# output_field must be resolved for constants.
expr = Cast(expr, expr.output_field)
yield OrderBy(expr, descending=descending), False
continue
if "." in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split(".", 1)
yield (
OrderBy(
RawSQL(
"%s.%s" % (self.quote_name_unless_alias(table), col), []
),
descending=descending,
),
False,
)
continue
if self.query.extra and col in self.query.extra:
if col in self.query.extra_select:
yield (
OrderBy(
Ref(col, RawSQL(*self.query.extra[col])),
descending=descending,
),
True,
)
else:
yield (
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False,
)
else:
if self.query.combinator and self.select:
# Don't use the first model's field because other
# combinated queries might define it differently.
yield OrderBy(F(col), descending=descending), False
else:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
yield from self.find_ordering_name(
field,
self.query.get_meta(),
default_order=default_order,
)
def get_order_by(self):
"""
Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for
the ORDER BY clause.
The order_by clause can alter the select clause (for example it can add
aliases to clauses that do not yet have one, or it can add totally new
select clauses).
"""
result = []
seen = set()
for expr, is_ref in self._order_by_pairs():
resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)
if self.query.combinator and self.select:
src = resolved.get_source_expressions()[0]
expr_src = expr.get_source_expressions()[0]
# Relabel order by columns to raw numbers if this is a combined
# query; necessary since the columns can't be referenced by the
# fully qualified name and the simple column names may collide.
for idx, (sel_expr, _, col_alias) in enumerate(self.select):
if is_ref and col_alias == src.refs:
src = src.source
elif col_alias and not (
isinstance(expr_src, F) and col_alias == expr_src.name
):
continue
if src == sel_expr:
resolved.set_source_expressions([RawSQL("%d" % (idx + 1), ())])
break
else:
if col_alias:
raise DatabaseError(
"ORDER BY term does not match any column in the result set."
)
# Add column used in ORDER BY clause to the selected
# columns and to each combined query.
order_by_idx = len(self.query.select) + 1
col_name = f"__orderbycol{order_by_idx}"
for q in self.query.combined_queries:
q.add_annotation(expr_src, col_name)
self.query.add_select_col(resolved, col_name)
resolved.set_source_expressions([RawSQL(f"{order_by_idx}", ())])
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql)[1]
params_hash = make_hashable(params)
if (without_ordering, params_hash) in seen:
continue
seen.add((without_ordering, params_hash))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
if self.query.distinct and not self.query.distinct_fields:
select_sql = [t[1] for t in select]
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql)[1]
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if (
(name in self.query.alias_map and name not in self.query.table_map)
or name in self.query.extra_select
or (
self.query.external_aliases.get(name)
and name not in self.query.table_map
)
):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node):
vendor_impl = getattr(node, "as_" + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
return sql, params
def get_combinator_sql(self, combinator, all):
features = self.connection.features
compilers = [
query.get_compiler(self.using, self.connection, self.elide_empty)
for query in self.query.combined_queries
if not query.is_empty()
]
if not features.supports_slicing_ordering_in_compound:
for query, compiler in zip(self.query.combined_queries, compilers):
if query.low_mark or query.high_mark:
raise DatabaseError(
"LIMIT/OFFSET not allowed in subqueries of compound statements."
)
if compiler.get_order_by():
raise DatabaseError(
"ORDER BY not allowed in subqueries of compound statements."
)
parts = ()
for compiler in compilers:
try:
# If the columns list is limited, then all combined queries
# must have the same columns list. Set the selects defined on
# the query on all combined queries, if not already set.
if not compiler.query.values_select and self.query.values_select:
compiler.query = compiler.query.clone()
compiler.query.set_values(
(
*self.query.extra_select,
*self.query.values_select,
*self.query.annotation_select,
)
)
part_sql, part_args = compiler.as_sql()
if compiler.query.combinator:
# Wrap in a subquery if wrapping in parentheses isn't
# supported.
if not features.supports_parentheses_in_compound:
part_sql = "SELECT * FROM ({})".format(part_sql)
# Add parentheses when combining with compound query if not
# already added for all compound queries.
elif (
self.query.subquery
or not features.supports_slicing_ordering_in_compound
):
part_sql = "({})".format(part_sql)
elif (
self.query.subquery
and features.supports_slicing_ordering_in_compound
):
part_sql = "({})".format(part_sql)
parts += ((part_sql, part_args),)
except EmptyResultSet:
# Omit the empty queryset with UNION and with DIFFERENCE if the
# first queryset is nonempty.
if combinator == "union" or (combinator == "difference" and parts):
continue
raise
if not parts:
raise EmptyResultSet
combinator_sql = self.connection.ops.set_operators[combinator]
if all and combinator == "union":
combinator_sql += " ALL"
braces = "{}"
if not self.query.subquery and features.supports_slicing_ordering_in_compound:
braces = "({})"
sql_parts, args_parts = zip(
*((braces.format(sql), args) for sql, args in parts)
)
result = [" {} ".format(combinator_sql).join(sql_parts)]
params = []
for part in args_parts:
params.extend(part)
return result, params
def get_qualify_sql(self):
where_parts = []
if self.where:
where_parts.append(self.where)
if self.having:
where_parts.append(self.having)
inner_query = self.query.clone()
inner_query.subquery = True
inner_query.where = inner_query.where.__class__(where_parts)
# Augment the inner query with any window function references that
# might have been masked via values() and alias(). If any masked
# aliases are added they'll be masked again to avoid fetching
# the data in the `if qual_aliases` branch below.
select = {
expr: alias for expr, _, alias in self.get_select(with_col_aliases=True)[0]
}
qual_aliases = set()
replacements = {}
expressions = list(self.qualify.leaves())
while expressions:
expr = expressions.pop()
if select_alias := (select.get(expr) or replacements.get(expr)):
replacements[expr] = select_alias
elif isinstance(expr, Lookup):
expressions.extend(expr.get_source_expressions())
else:
num_qual_alias = len(qual_aliases)
select_alias = f"qual{num_qual_alias}"
qual_aliases.add(select_alias)
inner_query.add_annotation(expr, select_alias)
replacements[expr] = select_alias
self.qualify = self.qualify.replace_expressions(
{expr: Ref(alias, expr) for expr, alias in replacements.items()}
)
inner_query_compiler = inner_query.get_compiler(
self.using, elide_empty=self.elide_empty
)
inner_sql, inner_params = inner_query_compiler.as_sql(
# The limits must be applied to the outer query to avoid pruning
# results too eagerly.
with_limits=False,
# Force unique aliasing of selected columns to avoid collisions
# and make rhs predicates referencing easier.
with_col_aliases=True,
)
qualify_sql, qualify_params = self.compile(self.qualify)
result = [
"SELECT * FROM (",
inner_sql,
")",
self.connection.ops.quote_name("qualify"),
"WHERE",
qualify_sql,
]
if qual_aliases:
# If some select aliases were unmasked for filtering purposes they
# must be masked back.
cols = [self.connection.ops.quote_name(alias) for alias in select.values()]
result = [
"SELECT",
", ".join(cols),
"FROM (",
*result,
")",
self.connection.ops.quote_name("qualify_mask"),
]
return result, list(inner_params) + qualify_params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup(
with_col_aliases=with_col_aliases,
)
for_update_part = None
# Is a LIMIT/OFFSET clause needed?
with_limit_offset = with_limits and (
self.query.high_mark is not None or self.query.low_mark
)
combinator = self.query.combinator
features = self.connection.features
if combinator:
if not getattr(features, "supports_select_{}".format(combinator)):
raise NotSupportedError(
"{} is not supported on this database backend.".format(
combinator
)
)
result, params = self.get_combinator_sql(
combinator, self.query.combinator_all
)
elif self.qualify:
result, params = self.get_qualify_sql()
order_by = None
else:
distinct_fields, distinct_params = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct'
# (see docstring of get_from_clause() for details).
from_, f_params = self.get_from_clause()
try:
where, w_params = (
self.compile(self.where) if self.where is not None else ("", [])
)
except EmptyResultSet:
if self.elide_empty:
raise
# Use a predicate that's always False.
where, w_params = "0 = 1", []
having, h_params = (
self.compile(self.having) if self.having is not None else ("", [])
)
result = ["SELECT"]
params = []
if self.query.distinct:
distinct_result, distinct_params = self.connection.ops.distinct_sql(
distinct_fields,
distinct_params,
)
result += distinct_result
params += distinct_params
out_cols = []
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = "%s AS %s" % (
s_sql,
self.connection.ops.quote_name(alias),
)
params.extend(s_params)
out_cols.append(s_sql)
result += [", ".join(out_cols)]
if from_:
result += ["FROM", *from_]
elif self.connection.features.bare_select_suffix:
result += [self.connection.features.bare_select_suffix]
params.extend(f_params)
if self.query.select_for_update and features.has_select_for_update:
if (
self.connection.get_autocommit()
# Don't raise an exception when database doesn't
# support transactions, as it's a noop.
and features.supports_transactions
):
raise TransactionManagementError(
"select_for_update cannot be used outside of a transaction."
)
if (
with_limit_offset
and not features.supports_select_for_update_with_limit
):
raise NotSupportedError(
"LIMIT/OFFSET is not supported with "
"select_for_update on this database backend."
)
nowait = self.query.select_for_update_nowait
skip_locked = self.query.select_for_update_skip_locked
of = self.query.select_for_update_of
no_key = self.query.select_for_no_key_update
# If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the
# backend doesn't support it, raise NotSupportedError to
# prevent a possible deadlock.
if nowait and not features.has_select_for_update_nowait:
raise NotSupportedError(
"NOWAIT is not supported on this database backend."
)
elif skip_locked and not features.has_select_for_update_skip_locked:
raise NotSupportedError(
"SKIP LOCKED is not supported on this database backend."
)
elif of and not features.has_select_for_update_of:
raise NotSupportedError(
"FOR UPDATE OF is not supported on this database backend."
)
elif no_key and not features.has_select_for_no_key_update:
raise NotSupportedError(
"FOR NO KEY UPDATE is not supported on this "
"database backend."
)
for_update_part = self.connection.ops.for_update_sql(
nowait=nowait,
skip_locked=skip_locked,
of=self.get_select_for_update_of_arguments(),
no_key=no_key,
)
if for_update_part and features.for_update_after_from:
result.append(for_update_part)
if where:
result.append("WHERE %s" % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) is not implemented."
)
order_by = order_by or self.connection.ops.force_no_ordering()
result.append("GROUP BY %s" % ", ".join(grouping))
if self._meta_ordering:
order_by = None
if having:
result.append("HAVING %s" % having)
params.extend(h_params)
if self.query.explain_info:
result.insert(
0,
self.connection.ops.explain_query_prefix(
self.query.explain_info.format,
**self.query.explain_info.options,
),
)
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append("ORDER BY %s" % ", ".join(ordering))
if with_limit_offset:
result.append(
self.connection.ops.limit_offset_sql(
self.query.low_mark, self.query.high_mark
)
)
if for_update_part and not features.for_update_after_from:
result.append(for_update_part)
if self.query.subquery and extra_select:
# If the query is used as a subquery, the extra selects would
# result in more columns than the left-hand side expression is
# expecting. This can happen when a subquery uses a combination
# of order_by() and distinct(), forcing the ordering expressions
# to be selected as well. Wrap the query in another subquery
# to exclude extraneous selects.
sub_selects = []
sub_params = []
for index, (select, _, alias) in enumerate(self.select, start=1):
if alias:
sub_selects.append(
"%s.%s"
% (
self.connection.ops.quote_name("subquery"),
self.connection.ops.quote_name(alias),
)
)
else:
select_clone = select.relabeled_clone(
{select.alias: "subquery"}
)
subselect, subparams = select_clone.as_sql(
self, self.connection
)
sub_selects.append(subselect)
sub_params.extend(subparams)
return "SELECT %s FROM (%s) subquery" % (
", ".join(sub_selects),
" ".join(result),
), tuple(sub_params + params)
return " ".join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
if (opts := self.query.get_meta()) is None:
return result
only_load = self.deferred_to_columns()
start_alias = start_alias or self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if (
from_parent
and model is not None
and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model
)
):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if field.model in only_load and field.attname not in only_load[field.model]:
continue
alias = self.query.join_parent_model(opts, model, start_alias, seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Return a quoted list of fields to use in DISTINCT ON part of the query.
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
result = []
params = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _, transform_function = self._setup_joins(
parts, opts, None
)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(self.connection.ops.quote_name(name))
else:
r, p = self.compile(transform_function(target, alias))
result.append(r)
params.append(p)
return result, params
def find_ordering_name(
self, name, opts, alias=None, default_order="ASC", already_seen=None
):
"""
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = order == "DESC"
pieces = name.split(LOOKUP_SEP)
(
field,
targets,
alias,
joins,
path,
opts,
transform_function,
) = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless it is the pk
# shortcut or the attribute name of the field that is specified.
if (
field.is_relation
and opts.ordering
and getattr(field, "attname", None) != pieces[-1]
and name != "pk"
):
# Firstly, avoid infinite loops.
already_seen = already_seen or set()
join_tuple = tuple(
getattr(self.query.alias_map[j], "join_cols", None) for j in joins
)
if join_tuple in already_seen:
raise FieldError("Infinite loop caused by ordering.")
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
if hasattr(item, "resolve_expression") and not isinstance(
item, OrderBy
):
item = item.desc() if descending else item.asc()
if isinstance(item, OrderBy):
results.append(
(item.prefix_references(f"{name}{LOOKUP_SEP}"), False)
)
continue
results.extend(
(expr.prefix_references(f"{name}{LOOKUP_SEP}"), is_ref)
for expr, is_ref in self.find_ordering_name(
item, opts, alias, order, already_seen
)
)
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [
(OrderBy(transform_function(t, alias), descending=descending), False)
for t in targets
]
def _setup_joins(self, pieces, opts, alias):
"""
Helper method for get_order_by() and get_distinct().
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
alias = alias or self.query.get_initial_alias()
field, targets, opts, joins, path, transform_function = self.query.setup_joins(
pieces, opts, alias
)
alias = joins[-1]
return field, targets, alias, joins, path, opts, transform_function
def get_from_clause(self):
"""
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
for alias in tuple(self.query.alias_map):
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if (
alias not in self.query.alias_map
or self.query.alias_refcount[alias] == 1
):
result.append(", %s" % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(
self,
select,
opts=None,
root_alias=None,
cur_depth=1,
requested=None,
restricted=None,
):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects
if f.field.unique
)
return chain(
direct_choices, reverse_choices, self.query._filtered_relations
)
related_klass_infos = []
if not restricted and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.deferred_to_columns()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
restricted = isinstance(self.query.select_related, dict)
if restricted:
requested = self.query.select_related
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info["related_klass_infos"] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or f.name in requested:
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s"
% (
f.name,
", ".join(_get_field_choices()) or "(none)",
)
)
else:
next = False
if not select_related_descend(
f, restricted, requested, only_load.get(field_model)
):
continue
klass_info = {
"model": f.remote_field.model,
"field": f,
"reverse": False,
"local_setter": f.set_cached_value,
"remote_setter": f.remote_field.set_cached_value
if f.unique
else lambda x, y: None,
"from_parent": False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _, _ = self.query.setup_joins([f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(
start_alias=alias, opts=f.remote_field.model._meta
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next_klass_infos = self.get_related_selections(
select,
f.remote_field.model._meta,
alias,
cur_depth + 1,
next,
restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
if not select_related_descend(
f, restricted, requested, only_load.get(model), reverse=True
):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
join_info = self.query.setup_joins(
[related_field_name], opts, root_alias
)
alias = join_info.joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
"model": model,
"field": f,
"reverse": True,
"local_setter": f.remote_field.set_cached_value,
"remote_setter": f.set_cached_value,
"from_parent": from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta, from_parent=opts.model
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select, model._meta, alias, cur_depth + 1, next, restricted
)
get_related_klass_infos(klass_info, next_klass_infos)
def local_setter(obj, from_obj):
# Set a reverse fk object when relation is non-empty.
if from_obj:
f.remote_field.set_cached_value(from_obj, obj)
def remote_setter(name, obj, from_obj):
setattr(from_obj, name, obj)
for name in list(requested):
# Filtered relations work only on the topmost level.
if cur_depth > 1:
break
if name in self.query._filtered_relations:
fields_found.add(name)
f, _, join_opts, joins, _, _ = self.query.setup_joins(
[name], opts, root_alias
)
model = join_opts.model
alias = joins[-1]
from_parent = (
issubclass(model, opts.model) and model is not opts.model
)
klass_info = {
"model": model,
"field": f,
"reverse": True,
"local_setter": local_setter,
"remote_setter": partial(remote_setter, name),
"from_parent": from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias,
opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next_requested = requested.get(name, {})
next_klass_infos = self.get_related_selections(
select,
opts=model._meta,
root_alias=alias,
cur_depth=cur_depth + 1,
requested=next_requested,
restricted=restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
"Invalid field name(s) given in select_related: %s. "
"Choices are: %s"
% (
", ".join(invalid_fields),
", ".join(_get_field_choices()) or "(none)",
)
)
return related_klass_infos
def get_select_for_update_of_arguments(self):
"""
Return a quoted list of arguments for the SELECT FOR UPDATE OF part of
the query.
"""
def _get_parent_klass_info(klass_info):
concrete_model = klass_info["model"]._meta.concrete_model
for parent_model, parent_link in concrete_model._meta.parents.items():
parent_list = parent_model._meta.get_parent_list()
yield {
"model": parent_model,
"field": parent_link,
"reverse": False,
"select_fields": [
select_index
for select_index in klass_info["select_fields"]
# Selected columns from a model or its parents.
if (
self.select[select_index][0].target.model == parent_model
or self.select[select_index][0].target.model in parent_list
)
],
}
def _get_first_selected_col_from_model(klass_info):
"""
Find the first selected column from a model. If it doesn't exist,
don't lock a model.
select_fields is filled recursively, so it also contains fields
from the parent models.
"""
concrete_model = klass_info["model"]._meta.concrete_model
for select_index in klass_info["select_fields"]:
if self.select[select_index][0].target.model == concrete_model:
return self.select[select_index][0]
def _get_field_choices():
"""Yield all allowed field paths in breadth-first search order."""
queue = collections.deque([(None, self.klass_info)])
while queue:
parent_path, klass_info = queue.popleft()
if parent_path is None:
path = []
yield "self"
else:
field = klass_info["field"]
if klass_info["reverse"]:
field = field.remote_field
path = parent_path + [field.name]
yield LOOKUP_SEP.join(path)
queue.extend(
(path, klass_info)
for klass_info in _get_parent_klass_info(klass_info)
)
queue.extend(
(path, klass_info)
for klass_info in klass_info.get("related_klass_infos", [])
)
if not self.klass_info:
return []
result = []
invalid_names = []
for name in self.query.select_for_update_of:
klass_info = self.klass_info
if name == "self":
col = _get_first_selected_col_from_model(klass_info)
else:
for part in name.split(LOOKUP_SEP):
klass_infos = (
*klass_info.get("related_klass_infos", []),
*_get_parent_klass_info(klass_info),
)
for related_klass_info in klass_infos:
field = related_klass_info["field"]
if related_klass_info["reverse"]:
field = field.remote_field
if field.name == part:
klass_info = related_klass_info
break
else:
klass_info = None
break
if klass_info is None:
invalid_names.append(name)
continue
col = _get_first_selected_col_from_model(klass_info)
if col is not None:
if self.connection.features.select_for_update_of_column:
result.append(self.compile(col)[0])
else:
result.append(self.quote_name_unless_alias(col.alias))
if invalid_names:
raise FieldError(
"Invalid field name(s) given in select_for_update(of=(...)): %s. "
"Only relational fields followed in the query are allowed. "
"Choices are: %s."
% (
", ".join(invalid_names),
", ".join(_get_field_choices()),
)
)
return result
def deferred_to_columns(self):
"""
Convert the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Return the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns)
return columns
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters + field_converters, expression)
return converters
def apply_converters(self, rows, converters):
connection = self.connection
converters = list(converters.items())
for row in map(list, rows):
for pos, (convs, expression) in converters:
value = row[pos]
for converter in convs:
value = converter(value, expression, connection)
row[pos] = value
yield row
def results_iter(
self,
results=None,
tuple_expected=False,
chunked_fetch=False,
chunk_size=GET_ITERATOR_CHUNK_SIZE,
):
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(
MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size
)
fields = [s[0] for s in self.select[0 : self.col_count]]
converters = self.get_converters(fields)
rows = chain.from_iterable(results)
if converters:
rows = self.apply_converters(rows, converters)
if tuple_expected:
rows = map(tuple, rows)
return rows
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
return bool(self.execute_sql(SINGLE))
def execute_sql(
self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE
):
"""
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
result_type = result_type or NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
if chunked_fetch:
cursor = self.connection.chunked_cursor()
else:
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
# Might fail for server-side cursors (e.g. connection closed)
cursor.close()
raise
if result_type == CURSOR:
# Give the caller the cursor to process and close.
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0 : self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor,
self.connection.features.empty_fetchmany_value,
self.col_count if self.has_extra_select else None,
chunk_size,
)
if not chunked_fetch or not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further. Use chunked_fetch if requested,
# unless the database doesn't support it.
return list(result)
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = "%s.%s" % (qn(alias), qn2(columns[index]))
self.query.where.add(RawSQL("%s = %s" % (lhs_sql, rhs), lhs_params), AND)
sql, params = self.as_sql()
return "EXISTS (%s)" % sql, params
def explain_query(self):
result = list(self.execute_sql())
# Some backends return 1 item tuples with strings, and others return
# tuples with integers and strings. Flatten them out into strings.
format_ = self.query.explain_info.format
output_formatter = json.dumps if format_ and format_.lower() == "json" else str
for row in result[0]:
if not isinstance(row, str):
yield " ".join(output_formatter(c) for c in row)
else:
yield row
class SQLInsertCompiler(SQLCompiler):
returning_fields = None
returning_params = ()
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, "as_sql"):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, "get_placeholder"):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = "%s", [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, "resolve_expression"):
value = value.resolve_expression(
self.query, allow_joins=False, for_save=True
)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
"can only be used to update, not to insert." % (value, field)
)
if value.contains_aggregate:
raise FieldError(
"Aggregate functions are not allowed in this query "
"(%s=%r)." % (field.name, value)
)
if value.contains_over_clause:
raise FieldError(
"Window expressions are not allowed in this query (%s=%r)."
% (field.name, value)
)
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
insert_statement = self.connection.ops.insert_statement(
on_conflict=self.query.on_conflict,
)
result = ["%s %s" % (insert_statement, qn(opts.db_table))]
fields = self.query.fields or [opts.pk]
result.append("(%s)" % ", ".join(qn(f.column) for f in fields))
if self.query.fields:
value_rows = [
[
self.prepare_value(field, self.pre_save_val(field, obj))
for field in fields
]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [
[self.connection.ops.pk_default_value()] for _ in self.query.objs
]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (
not self.returning_fields and self.connection.features.has_bulk_insert
)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
on_conflict_suffix_sql = self.connection.ops.on_conflict_suffix_sql(
fields,
self.query.on_conflict,
self.query.update_fields,
self.query.unique_fields,
)
if (
self.returning_fields
and self.connection.features.can_return_columns_from_insert
):
if self.connection.features.can_return_rows_from_bulk_insert:
result.append(
self.connection.ops.bulk_insert_sql(fields, placeholder_rows)
)
params = param_rows
else:
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
params = [param_rows[0]]
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
# Skip empty r_sql to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
r_sql, self.returning_params = self.connection.ops.return_insert_columns(
self.returning_fields
)
if r_sql:
result.append(r_sql)
params += [self.returning_params]
return [(" ".join(result), tuple(chain.from_iterable(params)))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, returning_fields=None):
assert not (
returning_fields
and len(self.query.objs) != 1
and not self.connection.features.can_return_rows_from_bulk_insert
)
opts = self.query.get_meta()
self.returning_fields = returning_fields
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not self.returning_fields:
return []
if (
self.connection.features.can_return_rows_from_bulk_insert
and len(self.query.objs) > 1
):
rows = self.connection.ops.fetch_returned_insert_rows(cursor)
elif self.connection.features.can_return_columns_from_insert:
assert len(self.query.objs) == 1
rows = [
self.connection.ops.fetch_returned_insert_columns(
cursor,
self.returning_params,
)
]
else:
rows = [
(
self.connection.ops.last_insert_id(
cursor,
opts.db_table,
opts.pk.column,
),
)
]
cols = [field.get_col(opts.db_table) for field in self.returning_fields]
converters = self.get_converters(cols)
if converters:
rows = list(self.apply_converters(rows, converters))
return rows
class SQLDeleteCompiler(SQLCompiler):
@cached_property
def single_alias(self):
# Ensure base table is in aliases.
self.query.get_initial_alias()
return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1
@classmethod
def _expr_refs_base_model(cls, expr, base_model):
if isinstance(expr, Query):
return expr.model == base_model
if not hasattr(expr, "get_source_expressions"):
return False
return any(
cls._expr_refs_base_model(source_expr, base_model)
for source_expr in expr.get_source_expressions()
)
@cached_property
def contains_self_reference_subquery(self):
return any(
self._expr_refs_base_model(expr, self.query.model)
for expr in chain(
self.query.annotations.values(), self.query.where.children
)
)
def _as_sql(self, query):
result = ["DELETE FROM %s" % self.quote_name_unless_alias(query.base_table)]
where, params = self.compile(query.where)
if where:
result.append("WHERE %s" % where)
return " ".join(result), tuple(params)
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
if self.single_alias and not self.contains_self_reference_subquery:
return self._as_sql(self.query)
innerq = self.query.clone()
innerq.__class__ = Query
innerq.clear_select_clause()
pk = self.query.model._meta.pk
innerq.select = [pk.get_col(self.query.get_initial_alias())]
outerq = Query(self.query.model)
if not self.connection.features.update_can_self_select:
# Force the materialization of the inner query to allow reference
# to the target table on MySQL.
sql, params = innerq.get_compiler(connection=self.connection).as_sql()
innerq = RawSQL("SELECT * FROM (%s) subquery" % sql, params)
outerq.add_filter("pk__in", innerq)
return self._as_sql(outerq)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return "", ()
qn = self.quote_name_unless_alias
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, "resolve_expression"):
val = val.resolve_expression(
self.query, allow_joins=False, for_save=True
)
if val.contains_aggregate:
raise FieldError(
"Aggregate functions are not allowed in this query "
"(%s=%r)." % (field.name, val)
)
if val.contains_over_clause:
raise FieldError(
"Window expressions are not allowed in this query "
"(%s=%r)." % (field.name, val)
)
elif hasattr(val, "prepare_database_save"):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, "get_placeholder"):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = "%s"
name = field.column
if hasattr(val, "as_sql"):
sql, params = self.compile(val)
values.append("%s = %s" % (qn(name), placeholder % sql))
update_params.extend(params)
elif val is not None:
values.append("%s = %s" % (qn(name), placeholder))
update_params.append(val)
else:
values.append("%s = NULL" % qn(name))
table = self.query.base_table
result = [
"UPDATE %s SET" % qn(table),
", ".join(values),
]
where, params = self.compile(self.query.where)
if where:
result.append("WHERE %s" % where)
return " ".join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super().execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.chain(klass=Query)
query.select_related = False
query.clear_ordering(force=True)
query.extra = {}
query.select = []
meta = query.get_meta()
fields = [meta.pk.name]
related_ids_index = []
for related in self.query.related_updates:
if all(
path.join_field.primary_key for path in meta.get_path_to_parent(related)
):
# If a primary key chain exists to the targeted related update,
# then the meta.pk value can be used for it.
related_ids_index.append((related, 0))
else:
# This branch will only be reached when updating a field of an
# ancestor that is not part of the primary key chain of a MTI
# tree.
related_ids_index.append((related, len(fields)))
fields.append(related._meta.pk.name)
query.add_fields(fields)
super().pre_sql_setup()
must_pre_select = (
count > 1 and not self.connection.features.update_can_self_select
)
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.clear_where()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
related_ids = collections.defaultdict(list)
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
for parent, index in related_ids_index:
related_ids[parent].extend(r[index] for r in rows)
self.query.add_filter("pk__in", idents)
self.query.related_ids = related_ids
else:
# The fast path. Filters and updates in one query.
self.query.add_filter("pk__in", query)
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation)
ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ", ".join(sql)
params = tuple(params)
inner_query_sql, inner_query_params = self.query.inner_query.get_compiler(
self.using,
elide_empty=self.elide_empty,
).as_sql(with_col_aliases=True)
sql = "SELECT %s FROM (%s) subquery" % (sql, inner_query_sql)
params = params + inner_query_params
return sql, params
def cursor_iter(cursor, sentinel, col_count, itersize):
"""
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel):
yield rows if col_count is None else [r[:col_count] for r in rows]
finally:
cursor.close()
|
b5242cdc0942e9a4ca1820492c35b17d8cbb7b10dfb71825d1fbc47126328f2a | """
Code to manage the creation and SQL rendering of 'where' constraints.
"""
import operator
from functools import reduce
from django.core.exceptions import EmptyResultSet
from django.db.models.expressions import Case, When
from django.db.models.lookups import Exact
from django.utils import tree
from django.utils.functional import cached_property
# Connection types
AND = "AND"
OR = "OR"
XOR = "XOR"
class WhereNode(tree.Node):
"""
An SQL WHERE clause.
The class is tied to the Query class that created it (in order to create
the correct SQL).
A child is usually an expression producing boolean values. Most likely the
expression is a Lookup instance.
However, a child could also be any class with as_sql() and either
relabeled_clone() method or relabel_aliases() and clone() methods and
contains_aggregate attribute.
"""
default = AND
resolved = False
conditional = True
def split_having_qualify(self, negated=False, must_group_by=False):
"""
Return three possibly None nodes: one for those parts of self that
should be included in the WHERE clause, one for those parts of self
that must be included in the HAVING clause, and one for those parts
that refer to window functions.
"""
if not self.contains_aggregate and not self.contains_over_clause:
return self, None, None
in_negated = negated ^ self.negated
# Whether or not children must be connected in the same filtering
# clause (WHERE > HAVING > QUALIFY) to maintain logical semantic.
must_remain_connected = (
(in_negated and self.connector == AND)
or (not in_negated and self.connector == OR)
or self.connector == XOR
)
if (
must_remain_connected
and self.contains_aggregate
and not self.contains_over_clause
):
# It's must cheaper to short-circuit and stash everything in the
# HAVING clause than split children if possible.
return None, self, None
where_parts = []
having_parts = []
qualify_parts = []
for c in self.children:
if hasattr(c, "split_having_qualify"):
where_part, having_part, qualify_part = c.split_having_qualify(
in_negated, must_group_by
)
if where_part is not None:
where_parts.append(where_part)
if having_part is not None:
having_parts.append(having_part)
if qualify_part is not None:
qualify_parts.append(qualify_part)
elif c.contains_over_clause:
qualify_parts.append(c)
elif c.contains_aggregate:
having_parts.append(c)
else:
where_parts.append(c)
if must_remain_connected and qualify_parts:
# Disjunctive heterogeneous predicates can be pushed down to
# qualify as long as no conditional aggregation is involved.
if not where_parts or (where_parts and not must_group_by):
return None, None, self
elif where_parts:
# In theory this should only be enforced when dealing with
# where_parts containing predicates against multi-valued
# relationships that could affect aggregation results but this
# is complex to infer properly.
raise NotImplementedError(
"Heterogeneous disjunctive predicates against window functions are "
"not implemented when performing conditional aggregation."
)
where_node = (
self.create(where_parts, self.connector, self.negated)
if where_parts
else None
)
having_node = (
self.create(having_parts, self.connector, self.negated)
if having_parts
else None
)
qualify_node = (
self.create(qualify_parts, self.connector, self.negated)
if qualify_parts
else None
)
return where_node, having_node, qualify_node
def as_sql(self, compiler, connection):
"""
Return the SQL version of the where clause and the value to be
substituted in. Return '', [] if this node matches everything,
None, [] if this node is empty, and raise EmptyResultSet if this
node can't match anything.
"""
result = []
result_params = []
if self.connector == AND:
full_needed, empty_needed = len(self.children), 1
else:
full_needed, empty_needed = 1, len(self.children)
if self.connector == XOR and not connection.features.supports_logical_xor:
# Convert if the database doesn't support XOR:
# a XOR b XOR c XOR ...
# to:
# (a OR b OR c OR ...) AND (a + b + c + ...) == 1
lhs = self.__class__(self.children, OR)
rhs_sum = reduce(
operator.add,
(Case(When(c, then=1), default=0) for c in self.children),
)
rhs = Exact(1, rhs_sum)
return self.__class__([lhs, rhs], AND, self.negated).as_sql(
compiler, connection
)
for child in self.children:
try:
sql, params = compiler.compile(child)
except EmptyResultSet:
empty_needed -= 1
else:
if sql:
result.append(sql)
result_params.extend(params)
else:
full_needed -= 1
# Check if this node matches nothing or everything.
# First check the amount of full nodes and empty nodes
# to make this node empty/full.
# Now, check if this node is full/empty using the
# counts.
if empty_needed == 0:
if self.negated:
return "", []
else:
raise EmptyResultSet
if full_needed == 0:
if self.negated:
raise EmptyResultSet
else:
return "", []
conn = " %s " % self.connector
sql_string = conn.join(result)
if sql_string:
if self.negated:
# Some backends (Oracle at least) need parentheses
# around the inner SQL in the negated case, even if the
# inner SQL contains just a single expression.
sql_string = "NOT (%s)" % sql_string
elif len(result) > 1 or self.resolved:
sql_string = "(%s)" % sql_string
return sql_string, result_params
def get_group_by_cols(self, alias=None):
cols = []
for child in self.children:
cols.extend(child.get_group_by_cols())
return cols
def get_source_expressions(self):
return self.children[:]
def set_source_expressions(self, children):
assert len(children) == len(self.children)
self.children = children
def relabel_aliases(self, change_map):
"""
Relabel the alias values of any children. 'change_map' is a dictionary
mapping old (current) alias values to the new values.
"""
for pos, child in enumerate(self.children):
if hasattr(child, "relabel_aliases"):
# For example another WhereNode
child.relabel_aliases(change_map)
elif hasattr(child, "relabeled_clone"):
self.children[pos] = child.relabeled_clone(change_map)
def clone(self):
clone = self.create(connector=self.connector, negated=self.negated)
for child in self.children:
if hasattr(child, "clone"):
child = child.clone()
clone.children.append(child)
return clone
def relabeled_clone(self, change_map):
clone = self.clone()
clone.relabel_aliases(change_map)
return clone
def replace_expressions(self, replacements):
if replacement := replacements.get(self):
return replacement
clone = self.create(connector=self.connector, negated=self.negated)
for child in self.children:
clone.children.append(child.replace_expressions(replacements))
return clone
@classmethod
def _contains_aggregate(cls, obj):
if isinstance(obj, tree.Node):
return any(cls._contains_aggregate(c) for c in obj.children)
return obj.contains_aggregate
@cached_property
def contains_aggregate(self):
return self._contains_aggregate(self)
@classmethod
def _contains_over_clause(cls, obj):
if isinstance(obj, tree.Node):
return any(cls._contains_over_clause(c) for c in obj.children)
return obj.contains_over_clause
@cached_property
def contains_over_clause(self):
return self._contains_over_clause(self)
@staticmethod
def _resolve_leaf(expr, query, *args, **kwargs):
if hasattr(expr, "resolve_expression"):
expr = expr.resolve_expression(query, *args, **kwargs)
return expr
@classmethod
def _resolve_node(cls, node, query, *args, **kwargs):
if hasattr(node, "children"):
for child in node.children:
cls._resolve_node(child, query, *args, **kwargs)
if hasattr(node, "lhs"):
node.lhs = cls._resolve_leaf(node.lhs, query, *args, **kwargs)
if hasattr(node, "rhs"):
node.rhs = cls._resolve_leaf(node.rhs, query, *args, **kwargs)
def resolve_expression(self, *args, **kwargs):
clone = self.clone()
clone._resolve_node(clone, *args, **kwargs)
clone.resolved = True
return clone
@cached_property
def output_field(self):
from django.db.models import BooleanField
return BooleanField()
@property
def _output_field_or_none(self):
return self.output_field
def select_format(self, compiler, sql, params):
# Wrap filters with a CASE WHEN expression if a database backend
# (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP
# BY list.
if not compiler.connection.features.supports_boolean_expr_in_select_clause:
sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END"
return sql, params
def get_db_converters(self, connection):
return self.output_field.get_db_converters(connection)
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def leaves(self):
for child in self.children:
if isinstance(child, WhereNode):
yield from child.leaves()
else:
yield child
class NothingNode:
"""A node that matches nothing."""
contains_aggregate = False
contains_over_clause = False
def as_sql(self, compiler=None, connection=None):
raise EmptyResultSet
class ExtraWhere:
# The contents are a black box - assume no aggregates or windows are used.
contains_aggregate = False
contains_over_clause = False
def __init__(self, sqls, params):
self.sqls = sqls
self.params = params
def as_sql(self, compiler=None, connection=None):
sqls = ["(%s)" % sql for sql in self.sqls]
return " AND ".join(sqls), list(self.params or ())
class SubqueryConstraint:
# Even if aggregates or windows would be used in a subquery,
# the outer query isn't interested about those.
contains_aggregate = False
contains_over_clause = False
def __init__(self, alias, columns, targets, query_object):
self.alias = alias
self.columns = columns
self.targets = targets
query_object.clear_ordering(clear_default=True)
self.query_object = query_object
def as_sql(self, compiler, connection):
query = self.query_object
query.set_values(self.targets)
query_compiler = query.get_compiler(connection=connection)
return query_compiler.as_subquery_condition(self.alias, self.columns, compiler)
|
a1d41f7bd0bdbade336bb6d046d7c03247a93cd26214c2440fb4f3ec504c7a07 | import logging
import operator
from datetime import datetime
from django.conf import settings
from django.db.backends.ddl_references import (
Columns,
Expressions,
ForeignKeyName,
IndexName,
Statement,
Table,
)
from django.db.backends.utils import names_digest, split_identifier
from django.db.models import Deferrable, Index
from django.db.models.sql import Query
from django.db.transaction import TransactionManagementError, atomic
from django.utils import timezone
logger = logging.getLogger("django.db.backends.schema")
def _is_relevant_relation(relation, altered_field):
"""
When altering the given field, must constraints on its model from the given
relation be temporarily dropped?
"""
field = relation.field
if field.many_to_many:
# M2M reverse field
return False
if altered_field.primary_key and field.to_fields == [None]:
# Foreign key constraint on the primary key, which is being altered.
return True
# Is the constraint targeting the field being altered?
return altered_field.name in field.to_fields
def _all_related_fields(model):
# Related fields must be returned in a deterministic order.
return sorted(
model._meta._get_fields(
forward=False,
reverse=True,
include_hidden=True,
include_parents=False,
),
key=operator.attrgetter("name"),
)
def _related_non_m2m_objects(old_field, new_field):
# Filter out m2m objects from reverse relations.
# Return (old_relation, new_relation) tuples.
related_fields = zip(
(
obj
for obj in _all_related_fields(old_field.model)
if _is_relevant_relation(obj, old_field)
),
(
obj
for obj in _all_related_fields(new_field.model)
if _is_relevant_relation(obj, new_field)
),
)
for old_rel, new_rel in related_fields:
yield old_rel, new_rel
yield from _related_non_m2m_objects(
old_rel.remote_field,
new_rel.remote_field,
)
class BaseDatabaseSchemaEditor:
"""
This class and its subclasses are responsible for emitting schema-changing
statements to the databases - model creation/removal/alteration, field
renaming, index fiddling, and so on.
"""
# Overrideable SQL templates
sql_create_table = "CREATE TABLE %(table)s (%(definition)s)"
sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s"
sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE"
sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s"
sql_alter_column = "ALTER TABLE %(table)s %(changes)s"
sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL"
sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL"
sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s"
sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT"
sql_alter_column_no_default_null = sql_alter_column_no_default
sql_alter_column_collate = "ALTER COLUMN %(column)s TYPE %(type)s%(collation)s"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE"
sql_rename_column = (
"ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s"
)
sql_update_with_default = (
"UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL"
)
sql_unique_constraint = "UNIQUE (%(columns)s)%(deferrable)s"
sql_check_constraint = "CHECK (%(check)s)"
sql_delete_constraint = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_constraint = "CONSTRAINT %(name)s %(constraint)s"
sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)"
sql_delete_check = sql_delete_constraint
sql_create_unique = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s "
"UNIQUE (%(columns)s)%(deferrable)s"
)
sql_delete_unique = sql_delete_constraint
sql_create_fk = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) "
"REFERENCES %(to_table)s (%(to_column)s)%(deferrable)s"
)
sql_create_inline_fk = None
sql_create_column_inline_fk = None
sql_delete_fk = sql_delete_constraint
sql_create_index = (
"CREATE INDEX %(name)s ON %(table)s "
"(%(columns)s)%(include)s%(extra)s%(condition)s"
)
sql_create_unique_index = (
"CREATE UNIQUE INDEX %(name)s ON %(table)s "
"(%(columns)s)%(include)s%(condition)s"
)
sql_rename_index = "ALTER INDEX %(old_name)s RENAME TO %(new_name)s"
sql_delete_index = "DROP INDEX %(name)s"
sql_create_pk = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
)
sql_delete_pk = sql_delete_constraint
sql_delete_procedure = "DROP PROCEDURE %(procedure)s"
def __init__(self, connection, collect_sql=False, atomic=True):
self.connection = connection
self.collect_sql = collect_sql
if self.collect_sql:
self.collected_sql = []
self.atomic_migration = self.connection.features.can_rollback_ddl and atomic
# State-managing methods
def __enter__(self):
self.deferred_sql = []
if self.atomic_migration:
self.atomic = atomic(self.connection.alias)
self.atomic.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
for sql in self.deferred_sql:
self.execute(sql)
if self.atomic_migration:
self.atomic.__exit__(exc_type, exc_value, traceback)
# Core utility functions
def execute(self, sql, params=()):
"""Execute the given SQL statement, with optional parameters."""
# Don't perform the transactional DDL check if SQL is being collected
# as it's not going to be executed anyway.
if (
not self.collect_sql
and self.connection.in_atomic_block
and not self.connection.features.can_rollback_ddl
):
raise TransactionManagementError(
"Executing DDL statements while in a transaction on databases "
"that can't perform a rollback is prohibited."
)
# Account for non-string statement objects.
sql = str(sql)
# Log the command we're running, then run it
logger.debug(
"%s; (params %r)", sql, params, extra={"params": params, "sql": sql}
)
if self.collect_sql:
ending = "" if sql.rstrip().endswith(";") else ";"
if params is not None:
self.collected_sql.append(
(sql % tuple(map(self.quote_value, params))) + ending
)
else:
self.collected_sql.append(sql + ending)
else:
with self.connection.cursor() as cursor:
cursor.execute(sql, params)
def quote_name(self, name):
return self.connection.ops.quote_name(name)
def table_sql(self, model):
"""Take a model and return its table definition."""
# Add any unique_togethers (always deferred, as some fields might be
# created afterward, like geometry fields with some backends).
for field_names in model._meta.unique_together:
fields = [model._meta.get_field(field) for field in field_names]
self.deferred_sql.append(self._create_unique_sql(model, fields))
# Create column SQL, add FK deferreds if needed.
column_sqls = []
params = []
for field in model._meta.local_fields:
# SQL.
definition, extra_params = self.column_sql(model, field)
if definition is None:
continue
# Check constraints can go on the column SQL here.
db_params = field.db_parameters(connection=self.connection)
if db_params["check"]:
definition += " " + self.sql_check_constraint % db_params
# Autoincrement SQL (for backends with inline variant).
col_type_suffix = field.db_type_suffix(connection=self.connection)
if col_type_suffix:
definition += " %s" % col_type_suffix
params.extend(extra_params)
# FK.
if field.remote_field and field.db_constraint:
to_table = field.remote_field.model._meta.db_table
to_column = field.remote_field.model._meta.get_field(
field.remote_field.field_name
).column
if self.sql_create_inline_fk:
definition += " " + self.sql_create_inline_fk % {
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
elif self.connection.features.supports_foreign_keys:
self.deferred_sql.append(
self._create_fk_sql(
model, field, "_fk_%(to_table)s_%(to_column)s"
)
)
# Add the SQL to our big list.
column_sqls.append(
"%s %s"
% (
self.quote_name(field.column),
definition,
)
)
# Autoincrement SQL (for backends with post table definition
# variant).
if field.get_internal_type() in (
"AutoField",
"BigAutoField",
"SmallAutoField",
):
autoinc_sql = self.connection.ops.autoinc_sql(
model._meta.db_table, field.column
)
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)
constraints = [
constraint.constraint_sql(model, self)
for constraint in model._meta.constraints
]
sql = self.sql_create_table % {
"table": self.quote_name(model._meta.db_table),
"definition": ", ".join(
constraint for constraint in (*column_sqls, *constraints) if constraint
),
}
if model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(
model._meta.db_tablespace
)
if tablespace_sql:
sql += " " + tablespace_sql
return sql, params
# Field <-> database mapping functions
def _iter_column_sql(
self, column_db_type, params, model, field, field_db_params, include_default
):
yield column_db_type
if collation := field_db_params.get("collation"):
yield self._collate_sql(collation)
# Work out nullability.
null = field.null
# Include a default value, if requested.
include_default = (
include_default
and not self.skip_default(field)
and
# Don't include a default value if it's a nullable field and the
# default cannot be dropped in the ALTER COLUMN statement (e.g.
# MySQL longtext and longblob).
not (null and self.skip_default_on_alter(field))
)
if include_default:
default_value = self.effective_default(field)
if default_value is not None:
column_default = "DEFAULT " + self._column_default_sql(field)
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (Oracle).
# If this is the case, the individual schema backend should
# implement prepare_default().
yield column_default % self.prepare_default(default_value)
else:
yield column_default
params.append(default_value)
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (
field.empty_strings_allowed
and not field.primary_key
and self.connection.features.interprets_empty_strings_as_nulls
):
null = True
if not null:
yield "NOT NULL"
elif not self.connection.features.implied_column_null:
yield "NULL"
if field.primary_key:
yield "PRIMARY KEY"
elif field.unique:
yield "UNIQUE"
# Optionally add the tablespace if it's an implicitly indexed column.
tablespace = field.db_tablespace or model._meta.db_tablespace
if (
tablespace
and self.connection.features.supports_tablespaces
and field.unique
):
yield self.connection.ops.tablespace_sql(tablespace, inline=True)
def column_sql(self, model, field, include_default=False):
"""
Return the column definition for a field. The field must already have
had set_attributes_from_name() called.
"""
# Get the column's type and use that as the basis of the SQL.
field_db_params = field.db_parameters(connection=self.connection)
column_db_type = field_db_params["type"]
# Check for fields that aren't actually columns (e.g. M2M).
if column_db_type is None:
return None, None
params = []
return (
" ".join(
# This appends to the params being returned.
self._iter_column_sql(
column_db_type,
params,
model,
field,
field_db_params,
include_default,
)
),
params,
)
def skip_default(self, field):
"""
Some backends don't accept default values for certain columns types
(i.e. MySQL longtext and longblob).
"""
return False
def skip_default_on_alter(self, field):
"""
Some backends don't accept default values for certain columns types
(i.e. MySQL longtext and longblob) in the ALTER COLUMN statement.
"""
return False
def prepare_default(self, value):
"""
Only used for backends which have requires_literal_defaults feature
"""
raise NotImplementedError(
"subclasses of BaseDatabaseSchemaEditor for backends which have "
"requires_literal_defaults must provide a prepare_default() method"
)
def _column_default_sql(self, field):
"""
Return the SQL to use in a DEFAULT clause. The resulting string should
contain a '%s' placeholder for a default value.
"""
return "%s"
@staticmethod
def _effective_default(field):
# This method allows testing its logic without a connection.
if field.has_default():
default = field.get_default()
elif not field.null and field.blank and field.empty_strings_allowed:
if field.get_internal_type() == "BinaryField":
default = b""
else:
default = ""
elif getattr(field, "auto_now", False) or getattr(field, "auto_now_add", False):
internal_type = field.get_internal_type()
if internal_type == "DateTimeField":
default = timezone.now()
else:
default = datetime.now()
if internal_type == "DateField":
default = default.date()
elif internal_type == "TimeField":
default = default.time()
else:
default = None
return default
def effective_default(self, field):
"""Return a field's effective database default value."""
return field.get_db_prep_save(self._effective_default(field), self.connection)
def quote_value(self, value):
"""
Return a quoted version of the value so it's safe to use in an SQL
string. This is not safe against injection from user code; it is
intended only for use in making SQL scripts or preparing default values
for particularly tricky backends (defaults are not user-defined, though,
so this is safe).
"""
raise NotImplementedError()
# Actions
def create_model(self, model):
"""
Create a table and any accompanying indexes or unique constraints for
the given `model`.
"""
sql, params = self.table_sql(model)
# Prevent using [] as params, in the case a literal '%' is used in the
# definition.
self.execute(sql, params or None)
# Add any field index and index_together's (deferred as SQLite
# _remake_table needs it).
self.deferred_sql.extend(self._model_indexes_sql(model))
# Make M2M tables
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
self.create_model(field.remote_field.through)
def delete_model(self, model):
"""Delete a model from the database."""
# Handle auto-created intermediary models
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
self.delete_model(field.remote_field.through)
# Delete the table
self.execute(
self.sql_delete_table
% {
"table": self.quote_name(model._meta.db_table),
}
)
# Remove all deferred statements referencing the deleted table.
for sql in list(self.deferred_sql):
if isinstance(sql, Statement) and sql.references_table(
model._meta.db_table
):
self.deferred_sql.remove(sql)
def add_index(self, model, index):
"""Add an index on a model."""
if (
index.contains_expressions
and not self.connection.features.supports_expression_indexes
):
return None
# Index.create_sql returns interpolated SQL which makes params=None a
# necessity to avoid escaping attempts on execution.
self.execute(index.create_sql(model, self), params=None)
def remove_index(self, model, index):
"""Remove an index from a model."""
if (
index.contains_expressions
and not self.connection.features.supports_expression_indexes
):
return None
self.execute(index.remove_sql(model, self))
def rename_index(self, model, old_index, new_index):
if self.connection.features.can_rename_index:
self.execute(
self._rename_index_sql(model, old_index.name, new_index.name),
params=None,
)
else:
self.remove_index(model, old_index)
self.add_index(model, new_index)
def add_constraint(self, model, constraint):
"""Add a constraint to a model."""
sql = constraint.create_sql(model, self)
if sql:
# Constraint.create_sql returns interpolated SQL which makes
# params=None a necessity to avoid escaping attempts on execution.
self.execute(sql, params=None)
def remove_constraint(self, model, constraint):
"""Remove a constraint from a model."""
sql = constraint.remove_sql(model, self)
if sql:
self.execute(sql)
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deal with a model changing its unique_together. The input
unique_togethers must be doubly-nested, not the single-nested
["foo", "bar"] format.
"""
olds = {tuple(fields) for fields in old_unique_together}
news = {tuple(fields) for fields in new_unique_together}
# Deleted uniques
for fields in olds.difference(news):
self._delete_composed_index(
model,
fields,
{"unique": True, "primary_key": False},
self.sql_delete_unique,
)
# Created uniques
for field_names in news.difference(olds):
fields = [model._meta.get_field(field) for field in field_names]
self.execute(self._create_unique_sql(model, fields))
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deal with a model changing its index_together. The input
index_togethers must be doubly-nested, not the single-nested
["foo", "bar"] format.
"""
olds = {tuple(fields) for fields in old_index_together}
news = {tuple(fields) for fields in new_index_together}
# Deleted indexes
for fields in olds.difference(news):
self._delete_composed_index(
model,
fields,
{"index": True, "unique": False},
self.sql_delete_index,
)
# Created indexes
for field_names in news.difference(olds):
fields = [model._meta.get_field(field) for field in field_names]
self.execute(self._create_index_sql(model, fields=fields, suffix="_idx"))
def _delete_composed_index(self, model, fields, constraint_kwargs, sql):
meta_constraint_names = {
constraint.name for constraint in model._meta.constraints
}
meta_index_names = {constraint.name for constraint in model._meta.indexes}
columns = [model._meta.get_field(field).column for field in fields]
constraint_names = self._constraint_names(
model,
columns,
exclude=meta_constraint_names | meta_index_names,
**constraint_kwargs,
)
if (
constraint_kwargs.get("unique") is True
and constraint_names
and self.connection.features.allows_multiple_constraints_on_same_fields
):
# Constraint matching the unique_together name.
default_name = str(
self._unique_constraint_name(model._meta.db_table, columns, quote=False)
)
if default_name in constraint_names:
constraint_names = [default_name]
if len(constraint_names) != 1:
raise ValueError(
"Found wrong number (%s) of constraints for %s(%s)"
% (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
)
)
self.execute(self._delete_constraint_sql(sql, model, constraint_names[0]))
def alter_db_table(self, model, old_db_table, new_db_table):
"""Rename the table a model points to."""
if old_db_table == new_db_table or (
self.connection.features.ignores_table_name_case
and old_db_table.lower() == new_db_table.lower()
):
return
self.execute(
self.sql_rename_table
% {
"old_table": self.quote_name(old_db_table),
"new_table": self.quote_name(new_db_table),
}
)
# Rename all references to the old table name.
for sql in self.deferred_sql:
if isinstance(sql, Statement):
sql.rename_table_references(old_db_table, new_db_table)
def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):
"""Move a model's table between tablespaces."""
self.execute(
self.sql_retablespace_table
% {
"table": self.quote_name(model._meta.db_table),
"old_tablespace": self.quote_name(old_db_tablespace),
"new_tablespace": self.quote_name(new_db_tablespace),
}
)
def add_field(self, model, field):
"""
Create a field on a model. Usually involves adding a column, but may
involve adding a table instead (for M2M fields).
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.create_model(field.remote_field.through)
# Get the column's definition
definition, params = self.column_sql(model, field, include_default=True)
# It might not actually have a column behind it
if definition is None:
return
if col_type_suffix := field.db_type_suffix(connection=self.connection):
definition += f" {col_type_suffix}"
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params["check"]:
definition += " " + self.sql_check_constraint % db_params
if (
field.remote_field
and self.connection.features.supports_foreign_keys
and field.db_constraint
):
constraint_suffix = "_fk_%(to_table)s_%(to_column)s"
# Add FK constraint inline, if supported.
if self.sql_create_column_inline_fk:
to_table = field.remote_field.model._meta.db_table
to_column = field.remote_field.model._meta.get_field(
field.remote_field.field_name
).column
namespace, _ = split_identifier(model._meta.db_table)
definition += " " + self.sql_create_column_inline_fk % {
"name": self._fk_constraint_name(model, field, constraint_suffix),
"namespace": "%s." % self.quote_name(namespace)
if namespace
else "",
"column": self.quote_name(field.column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
"deferrable": self.connection.ops.deferrable_sql(),
}
# Otherwise, add FK constraints later.
else:
self.deferred_sql.append(
self._create_fk_sql(model, field, constraint_suffix)
)
# Build the SQL and run it
sql = self.sql_create_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"definition": definition,
}
self.execute(sql, params)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if (
not self.skip_default_on_alter(field)
and self.effective_default(field) is not None
):
changes_sql, params = self._alter_column_default_sql(
model, None, field, drop=True
)
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": changes_sql,
}
self.execute(sql, params)
# Add an index, if required
self.deferred_sql.extend(self._field_indexes_sql(model, field))
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def remove_field(self, model, field):
"""
Remove a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.delete_model(field.remote_field.through)
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)["type"] is None:
return
# Drop any FK constraints, MySQL requires explicit deletion
if field.remote_field:
fk_names = self._constraint_names(model, [field.column], foreign_key=True)
for fk_name in fk_names:
self.execute(self._delete_fk_sql(model, fk_name))
# Delete the column
sql = self.sql_delete_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
# Remove all deferred statements referencing the deleted column.
for sql in list(self.deferred_sql):
if isinstance(sql, Statement) and sql.references_column(
model._meta.db_table, field.column
):
self.deferred_sql.remove(sql)
def alter_field(self, model, old_field, new_field, strict=False):
"""
Allow a field's type, uniqueness, nullability, default, column,
constraints, etc. to be modified.
`old_field` is required to compute the necessary changes.
If `strict` is True, raise errors if the old column does not match
`old_field` precisely.
"""
if not self._field_should_be_altered(old_field, new_field):
return
# Ensure this field is even column-based
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params["type"]
new_db_params = new_field.db_parameters(connection=self.connection)
new_type = new_db_params["type"]
if (old_type is None and old_field.remote_field is None) or (
new_type is None and new_field.remote_field is None
):
raise ValueError(
"Cannot alter field %s into %s - they do not properly define "
"db_type (are you using a badly-written custom field?)"
% (old_field, new_field),
)
elif (
old_type is None
and new_type is None
and (
old_field.remote_field.through
and new_field.remote_field.through
and old_field.remote_field.through._meta.auto_created
and new_field.remote_field.through._meta.auto_created
)
):
return self._alter_many_to_many(model, old_field, new_field, strict)
elif (
old_type is None
and new_type is None
and (
old_field.remote_field.through
and new_field.remote_field.through
and not old_field.remote_field.through._meta.auto_created
and not new_field.remote_field.through._meta.auto_created
)
):
# Both sides have through models; this is a no-op.
return
elif old_type is None or new_type is None:
raise ValueError(
"Cannot alter field %s into %s - they are not compatible types "
"(you cannot alter to or from M2M fields, or add or remove "
"through= on M2M fields)" % (old_field, new_field)
)
self._alter_field(
model,
old_field,
new_field,
old_type,
new_type,
old_db_params,
new_db_params,
strict,
)
def _alter_field(
self,
model,
old_field,
new_field,
old_type,
new_type,
old_db_params,
new_db_params,
strict=False,
):
"""Perform a "physical" (non-ManyToMany) field update."""
# Drop any FK constraints, we'll remake them later
fks_dropped = set()
if (
self.connection.features.supports_foreign_keys
and old_field.remote_field
and old_field.db_constraint
):
fk_names = self._constraint_names(
model, [old_field.column], foreign_key=True
)
if strict and len(fk_names) != 1:
raise ValueError(
"Found wrong number (%s) of foreign key constraints for %s.%s"
% (
len(fk_names),
model._meta.db_table,
old_field.column,
)
)
for fk_name in fk_names:
fks_dropped.add((old_field.column,))
self.execute(self._delete_fk_sql(model, fk_name))
# Has unique been removed?
if old_field.unique and (
not new_field.unique or self._field_became_primary_key(old_field, new_field)
):
# Find the unique constraint for this field
meta_constraint_names = {
constraint.name for constraint in model._meta.constraints
}
constraint_names = self._constraint_names(
model,
[old_field.column],
unique=True,
primary_key=False,
exclude=meta_constraint_names,
)
if strict and len(constraint_names) != 1:
raise ValueError(
"Found wrong number (%s) of unique constraints for %s.%s"
% (
len(constraint_names),
model._meta.db_table,
old_field.column,
)
)
for constraint_name in constraint_names:
self.execute(self._delete_unique_sql(model, constraint_name))
# Drop incoming FK constraints if the field is a primary key or unique,
# which might be a to_field target, and things are going to change.
old_collation = old_db_params.get("collation")
new_collation = new_db_params.get("collation")
drop_foreign_keys = (
self.connection.features.supports_foreign_keys
and (
(old_field.primary_key and new_field.primary_key)
or (old_field.unique and new_field.unique)
)
and ((old_type != new_type) or (old_collation != new_collation))
)
if drop_foreign_keys:
# '_meta.related_field' also contains M2M reverse fields, these
# will be filtered out
for _old_rel, new_rel in _related_non_m2m_objects(old_field, new_field):
rel_fk_names = self._constraint_names(
new_rel.related_model, [new_rel.field.column], foreign_key=True
)
for fk_name in rel_fk_names:
self.execute(self._delete_fk_sql(new_rel.related_model, fk_name))
# Removed an index? (no strict check, as multiple indexes are possible)
# Remove indexes if db_index switched to False or a unique constraint
# will now be used in lieu of an index. The following lines from the
# truth table show all True cases; the rest are False:
#
# old_field.db_index | old_field.unique | new_field.db_index | new_field.unique
# ------------------------------------------------------------------------------
# True | False | False | False
# True | False | False | True
# True | False | True | True
if (
old_field.db_index
and not old_field.unique
and (not new_field.db_index or new_field.unique)
):
# Find the index for this field
meta_index_names = {index.name for index in model._meta.indexes}
# Retrieve only BTREE indexes since this is what's created with
# db_index=True.
index_names = self._constraint_names(
model,
[old_field.column],
index=True,
type_=Index.suffix,
exclude=meta_index_names,
)
for index_name in index_names:
# The only way to check if an index was created with
# db_index=True or with Index(['field'], name='foo')
# is to look at its name (refs #28053).
self.execute(self._delete_index_sql(model, index_name))
# Change check constraints?
if old_db_params["check"] != new_db_params["check"] and old_db_params["check"]:
meta_constraint_names = {
constraint.name for constraint in model._meta.constraints
}
constraint_names = self._constraint_names(
model,
[old_field.column],
check=True,
exclude=meta_constraint_names,
)
if strict and len(constraint_names) != 1:
raise ValueError(
"Found wrong number (%s) of check constraints for %s.%s"
% (
len(constraint_names),
model._meta.db_table,
old_field.column,
)
)
for constraint_name in constraint_names:
self.execute(self._delete_check_sql(model, constraint_name))
# Have they renamed the column?
if old_field.column != new_field.column:
self.execute(
self._rename_field_sql(
model._meta.db_table, old_field, new_field, new_type
)
)
# Rename all references to the renamed column.
for sql in self.deferred_sql:
if isinstance(sql, Statement):
sql.rename_column_references(
model._meta.db_table, old_field.column, new_field.column
)
# Next, start accumulating actions to do
actions = []
null_actions = []
post_actions = []
# Type suffix change? (e.g. auto increment).
old_type_suffix = old_field.db_type_suffix(connection=self.connection)
new_type_suffix = new_field.db_type_suffix(connection=self.connection)
# Collation change?
if old_collation != new_collation:
# Collation change handles also a type change.
fragment = self._alter_column_collation_sql(
model, new_field, new_type, new_collation, old_field
)
actions.append(fragment)
# Type change?
elif (old_type, old_type_suffix) != (new_type, new_type_suffix):
fragment, other_actions = self._alter_column_type_sql(
model, old_field, new_field, new_type
)
actions.append(fragment)
post_actions.extend(other_actions)
# When changing a column NULL constraint to NOT NULL with a given
# default value, we need to perform 4 steps:
# 1. Add a default for new incoming writes
# 2. Update existing NULL rows with new default
# 3. Replace NULL constraint with NOT NULL
# 4. Drop the default again.
# Default change?
needs_database_default = False
if old_field.null and not new_field.null:
old_default = self.effective_default(old_field)
new_default = self.effective_default(new_field)
if (
not self.skip_default_on_alter(new_field)
and old_default != new_default
and new_default is not None
):
needs_database_default = True
actions.append(
self._alter_column_default_sql(model, old_field, new_field)
)
# Nullability change?
if old_field.null != new_field.null:
fragment = self._alter_column_null_sql(model, old_field, new_field)
if fragment:
null_actions.append(fragment)
# Only if we have a default and there is a change from NULL to NOT NULL
four_way_default_alteration = new_field.has_default() and (
old_field.null and not new_field.null
)
if actions or null_actions:
if not four_way_default_alteration:
# If we don't have to do a 4-way default alteration we can
# directly run a (NOT) NULL alteration
actions = actions + null_actions
# Combine actions together if we can (e.g. postgres)
if self.connection.features.supports_combined_alters and actions:
sql, params = tuple(zip(*actions))
actions = [(", ".join(sql), sum(params, []))]
# Apply those actions
for sql, params in actions:
self.execute(
self.sql_alter_column
% {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if four_way_default_alteration:
# Update existing rows with default value
self.execute(
self.sql_update_with_default
% {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
)
# Since we didn't run a NOT NULL change before we need to do it
# now
for sql, params in null_actions:
self.execute(
self.sql_alter_column
% {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if post_actions:
for sql, params in post_actions:
self.execute(sql, params)
# If primary_key changed to False, delete the primary key constraint.
if old_field.primary_key and not new_field.primary_key:
self._delete_primary_key(model, strict)
# Added a unique?
if self._unique_should_be_added(old_field, new_field):
self.execute(self._create_unique_sql(model, [new_field]))
# Added an index? Add an index if db_index switched to True or a unique
# constraint will no longer be used in lieu of an index. The following
# lines from the truth table show all True cases; the rest are False:
#
# old_field.db_index | old_field.unique | new_field.db_index | new_field.unique
# ------------------------------------------------------------------------------
# False | False | True | False
# False | True | True | False
# True | True | True | False
if (
(not old_field.db_index or old_field.unique)
and new_field.db_index
and not new_field.unique
):
self.execute(self._create_index_sql(model, fields=[new_field]))
# Type alteration on primary key? Then we need to alter the column
# referring to us.
rels_to_update = []
if drop_foreign_keys:
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Changed to become primary key?
if self._field_became_primary_key(old_field, new_field):
# Make the new one
self.execute(self._create_primary_key_sql(model, new_field))
# Update all referencing columns
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Handle our type alters on the other end of rels from the PK stuff above
for old_rel, new_rel in rels_to_update:
rel_db_params = new_rel.field.db_parameters(connection=self.connection)
rel_type = rel_db_params["type"]
rel_collation = rel_db_params.get("collation")
old_rel_db_params = old_rel.field.db_parameters(connection=self.connection)
old_rel_collation = old_rel_db_params.get("collation")
if old_rel_collation != rel_collation:
# Collation change handles also a type change.
fragment = self._alter_column_collation_sql(
new_rel.related_model,
new_rel.field,
rel_type,
rel_collation,
old_rel.field,
)
other_actions = []
else:
fragment, other_actions = self._alter_column_type_sql(
new_rel.related_model, old_rel.field, new_rel.field, rel_type
)
self.execute(
self.sql_alter_column
% {
"table": self.quote_name(new_rel.related_model._meta.db_table),
"changes": fragment[0],
},
fragment[1],
)
for sql, params in other_actions:
self.execute(sql, params)
# Does it have a foreign key?
if (
self.connection.features.supports_foreign_keys
and new_field.remote_field
and (
fks_dropped or not old_field.remote_field or not old_field.db_constraint
)
and new_field.db_constraint
):
self.execute(
self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s")
)
# Rebuild FKs that pointed to us if we previously had to drop them
if drop_foreign_keys:
for _, rel in rels_to_update:
if rel.field.db_constraint:
self.execute(
self._create_fk_sql(rel.related_model, rel.field, "_fk")
)
# Does it have check constraints we need to add?
if old_db_params["check"] != new_db_params["check"] and new_db_params["check"]:
constraint_name = self._create_index_name(
model._meta.db_table, [new_field.column], suffix="_check"
)
self.execute(
self._create_check_sql(model, constraint_name, new_db_params["check"])
)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if needs_database_default:
changes_sql, params = self._alter_column_default_sql(
model, old_field, new_field, drop=True
)
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": changes_sql,
}
self.execute(sql, params)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def _alter_column_null_sql(self, model, old_field, new_field):
"""
Hook to specialize column null alteration.
Return a (sql, params) fragment to set a column to null or non-null
as required by new_field, or None if no changes are required.
"""
if (
self.connection.features.interprets_empty_strings_as_nulls
and new_field.empty_strings_allowed
):
# The field is nullable in the database anyway, leave it alone.
return
else:
new_db_params = new_field.db_parameters(connection=self.connection)
sql = (
self.sql_alter_column_null
if new_field.null
else self.sql_alter_column_not_null
)
return (
sql
% {
"column": self.quote_name(new_field.column),
"type": new_db_params["type"],
},
[],
)
def _alter_column_default_sql(self, model, old_field, new_field, drop=False):
"""
Hook to specialize column default alteration.
Return a (sql, params) fragment to add or drop (depending on the drop
argument) a default to new_field's column.
"""
new_default = self.effective_default(new_field)
default = self._column_default_sql(new_field)
params = [new_default]
if drop:
params = []
elif self.connection.features.requires_literal_defaults:
# Some databases (Oracle) can't take defaults as a parameter
# If this is the case, the SchemaEditor for that database should
# implement prepare_default().
default = self.prepare_default(new_default)
params = []
new_db_params = new_field.db_parameters(connection=self.connection)
if drop:
if new_field.null:
sql = self.sql_alter_column_no_default_null
else:
sql = self.sql_alter_column_no_default
else:
sql = self.sql_alter_column_default
return (
sql
% {
"column": self.quote_name(new_field.column),
"type": new_db_params["type"],
"default": default,
},
params,
)
def _alter_column_type_sql(self, model, old_field, new_field, new_type):
"""
Hook to specialize column type alteration for different backends,
for cases when a creation type is different to an alteration type
(e.g. SERIAL in PostgreSQL, PostGIS fields).
Return a two-tuple of: an SQL fragment of (sql, params) to insert into
an ALTER TABLE statement and a list of extra (sql, params) tuples to
run once the field is altered.
"""
return (
(
self.sql_alter_column_type
% {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
),
[],
)
def _alter_column_collation_sql(
self, model, new_field, new_type, new_collation, old_field
):
return (
self.sql_alter_column_collate
% {
"column": self.quote_name(new_field.column),
"type": new_type,
"collation": " " + self._collate_sql(new_collation)
if new_collation
else "",
},
[],
)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""Alter M2Ms to repoint their to= endpoints."""
# Rename the through table
if (
old_field.remote_field.through._meta.db_table
!= new_field.remote_field.through._meta.db_table
):
self.alter_db_table(
old_field.remote_field.through,
old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table,
)
# Repoint the FK to the other side
self.alter_field(
new_field.remote_field.through,
# The field that points to the target model is needed, so we can
# tell alter_field to change it - this is m2m_reverse_field_name()
# (as opposed to m2m_field_name(), which points to our model).
old_field.remote_field.through._meta.get_field(
old_field.m2m_reverse_field_name()
),
new_field.remote_field.through._meta.get_field(
new_field.m2m_reverse_field_name()
),
)
self.alter_field(
new_field.remote_field.through,
# for self-referential models we need to alter field from the other end too
old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()),
)
def _create_index_name(self, table_name, column_names, suffix=""):
"""
Generate a unique name for an index/unique constraint.
The name is divided into 3 parts: the table name, the column names,
and a unique digest and suffix.
"""
_, table_name = split_identifier(table_name)
hash_suffix_part = "%s%s" % (
names_digest(table_name, *column_names, length=8),
suffix,
)
max_length = self.connection.ops.max_name_length() or 200
# If everything fits into max_length, use that name.
index_name = "%s_%s_%s" % (table_name, "_".join(column_names), hash_suffix_part)
if len(index_name) <= max_length:
return index_name
# Shorten a long suffix.
if len(hash_suffix_part) > max_length / 3:
hash_suffix_part = hash_suffix_part[: max_length // 3]
other_length = (max_length - len(hash_suffix_part)) // 2 - 1
index_name = "%s_%s_%s" % (
table_name[:other_length],
"_".join(column_names)[:other_length],
hash_suffix_part,
)
# Prepend D if needed to prevent the name from starting with an
# underscore or a number (not permitted on Oracle).
if index_name[0] == "_" or index_name[0].isdigit():
index_name = "D%s" % index_name[:-1]
return index_name
def _get_index_tablespace_sql(self, model, fields, db_tablespace=None):
if db_tablespace is None:
if len(fields) == 1 and fields[0].db_tablespace:
db_tablespace = fields[0].db_tablespace
elif settings.DEFAULT_INDEX_TABLESPACE:
db_tablespace = settings.DEFAULT_INDEX_TABLESPACE
elif model._meta.db_tablespace:
db_tablespace = model._meta.db_tablespace
if db_tablespace is not None:
return " " + self.connection.ops.tablespace_sql(db_tablespace)
return ""
def _index_condition_sql(self, condition):
if condition:
return " WHERE " + condition
return ""
def _index_include_sql(self, model, columns):
if not columns or not self.connection.features.supports_covering_indexes:
return ""
return Statement(
" INCLUDE (%(columns)s)",
columns=Columns(model._meta.db_table, columns, self.quote_name),
)
def _create_index_sql(
self,
model,
*,
fields=None,
name=None,
suffix="",
using="",
db_tablespace=None,
col_suffixes=(),
sql=None,
opclasses=(),
condition=None,
include=None,
expressions=None,
):
"""
Return the SQL statement to create the index for one or several fields
or expressions. `sql` can be specified if the syntax differs from the
standard (GIS indexes, ...).
"""
fields = fields or []
expressions = expressions or []
compiler = Query(model, alias_cols=False).get_compiler(
connection=self.connection,
)
tablespace_sql = self._get_index_tablespace_sql(
model, fields, db_tablespace=db_tablespace
)
columns = [field.column for field in fields]
sql_create_index = sql or self.sql_create_index
table = model._meta.db_table
def create_index_name(*args, **kwargs):
nonlocal name
if name is None:
name = self._create_index_name(*args, **kwargs)
return self.quote_name(name)
return Statement(
sql_create_index,
table=Table(table, self.quote_name),
name=IndexName(table, columns, suffix, create_index_name),
using=using,
columns=(
self._index_columns(table, columns, col_suffixes, opclasses)
if columns
else Expressions(table, expressions, compiler, self.quote_value)
),
extra=tablespace_sql,
condition=self._index_condition_sql(condition),
include=self._index_include_sql(model, include),
)
def _delete_index_sql(self, model, name, sql=None):
return Statement(
sql or self.sql_delete_index,
table=Table(model._meta.db_table, self.quote_name),
name=self.quote_name(name),
)
def _rename_index_sql(self, model, old_name, new_name):
return Statement(
self.sql_rename_index,
table=Table(model._meta.db_table, self.quote_name),
old_name=self.quote_name(old_name),
new_name=self.quote_name(new_name),
)
def _index_columns(self, table, columns, col_suffixes, opclasses):
return Columns(table, columns, self.quote_name, col_suffixes=col_suffixes)
def _model_indexes_sql(self, model):
"""
Return a list of all index SQL statements (field indexes,
index_together, Meta.indexes) for the specified model.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
for field in model._meta.local_fields:
output.extend(self._field_indexes_sql(model, field))
for field_names in model._meta.index_together:
fields = [model._meta.get_field(field) for field in field_names]
output.append(self._create_index_sql(model, fields=fields, suffix="_idx"))
for index in model._meta.indexes:
if (
not index.contains_expressions
or self.connection.features.supports_expression_indexes
):
output.append(index.create_sql(model, self))
return output
def _field_indexes_sql(self, model, field):
"""
Return a list of all index SQL statements for the specified field.
"""
output = []
if self._field_should_be_indexed(model, field):
output.append(self._create_index_sql(model, fields=[field]))
return output
def _field_should_be_altered(self, old_field, new_field):
_, old_path, old_args, old_kwargs = old_field.deconstruct()
_, new_path, new_args, new_kwargs = new_field.deconstruct()
# Don't alter when:
# - changing only a field name
# - changing an attribute that doesn't affect the schema
# - adding only a db_column and the column name is not changed
for attr in old_field.non_db_attrs:
old_kwargs.pop(attr, None)
for attr in new_field.non_db_attrs:
new_kwargs.pop(attr, None)
return self.quote_name(old_field.column) != self.quote_name(
new_field.column
) or (old_path, old_args, old_kwargs) != (new_path, new_args, new_kwargs)
def _field_should_be_indexed(self, model, field):
return field.db_index and not field.unique
def _field_became_primary_key(self, old_field, new_field):
return not old_field.primary_key and new_field.primary_key
def _unique_should_be_added(self, old_field, new_field):
return (
not new_field.primary_key
and new_field.unique
and (not old_field.unique or old_field.primary_key)
)
def _rename_field_sql(self, table, old_field, new_field, new_type):
return self.sql_rename_column % {
"table": self.quote_name(table),
"old_column": self.quote_name(old_field.column),
"new_column": self.quote_name(new_field.column),
"type": new_type,
}
def _create_fk_sql(self, model, field, suffix):
table = Table(model._meta.db_table, self.quote_name)
name = self._fk_constraint_name(model, field, suffix)
column = Columns(model._meta.db_table, [field.column], self.quote_name)
to_table = Table(field.target_field.model._meta.db_table, self.quote_name)
to_column = Columns(
field.target_field.model._meta.db_table,
[field.target_field.column],
self.quote_name,
)
deferrable = self.connection.ops.deferrable_sql()
return Statement(
self.sql_create_fk,
table=table,
name=name,
column=column,
to_table=to_table,
to_column=to_column,
deferrable=deferrable,
)
def _fk_constraint_name(self, model, field, suffix):
def create_fk_name(*args, **kwargs):
return self.quote_name(self._create_index_name(*args, **kwargs))
return ForeignKeyName(
model._meta.db_table,
[field.column],
split_identifier(field.target_field.model._meta.db_table)[1],
[field.target_field.column],
suffix,
create_fk_name,
)
def _delete_fk_sql(self, model, name):
return self._delete_constraint_sql(self.sql_delete_fk, model, name)
def _deferrable_constraint_sql(self, deferrable):
if deferrable is None:
return ""
if deferrable == Deferrable.DEFERRED:
return " DEFERRABLE INITIALLY DEFERRED"
if deferrable == Deferrable.IMMEDIATE:
return " DEFERRABLE INITIALLY IMMEDIATE"
def _unique_sql(
self,
model,
fields,
name,
condition=None,
deferrable=None,
include=None,
opclasses=None,
expressions=None,
):
if (
deferrable
and not self.connection.features.supports_deferrable_unique_constraints
):
return None
if condition or include or opclasses or expressions:
# Databases support conditional, covering, and functional unique
# constraints via a unique index.
sql = self._create_unique_sql(
model,
fields,
name=name,
condition=condition,
include=include,
opclasses=opclasses,
expressions=expressions,
)
if sql:
self.deferred_sql.append(sql)
return None
constraint = self.sql_unique_constraint % {
"columns": ", ".join([self.quote_name(field.column) for field in fields]),
"deferrable": self._deferrable_constraint_sql(deferrable),
}
return self.sql_constraint % {
"name": self.quote_name(name),
"constraint": constraint,
}
def _create_unique_sql(
self,
model,
fields,
name=None,
condition=None,
deferrable=None,
include=None,
opclasses=None,
expressions=None,
):
if (
(
deferrable
and not self.connection.features.supports_deferrable_unique_constraints
)
or (condition and not self.connection.features.supports_partial_indexes)
or (include and not self.connection.features.supports_covering_indexes)
or (
expressions and not self.connection.features.supports_expression_indexes
)
):
return None
compiler = Query(model, alias_cols=False).get_compiler(
connection=self.connection
)
table = model._meta.db_table
columns = [field.column for field in fields]
if name is None:
name = self._unique_constraint_name(table, columns, quote=True)
else:
name = self.quote_name(name)
if condition or include or opclasses or expressions:
sql = self.sql_create_unique_index
else:
sql = self.sql_create_unique
if columns:
columns = self._index_columns(
table, columns, col_suffixes=(), opclasses=opclasses
)
else:
columns = Expressions(table, expressions, compiler, self.quote_value)
return Statement(
sql,
table=Table(table, self.quote_name),
name=name,
columns=columns,
condition=self._index_condition_sql(condition),
deferrable=self._deferrable_constraint_sql(deferrable),
include=self._index_include_sql(model, include),
)
def _unique_constraint_name(self, table, columns, quote=True):
if quote:
def create_unique_name(*args, **kwargs):
return self.quote_name(self._create_index_name(*args, **kwargs))
else:
create_unique_name = self._create_index_name
return IndexName(table, columns, "_uniq", create_unique_name)
def _delete_unique_sql(
self,
model,
name,
condition=None,
deferrable=None,
include=None,
opclasses=None,
expressions=None,
):
if (
(
deferrable
and not self.connection.features.supports_deferrable_unique_constraints
)
or (condition and not self.connection.features.supports_partial_indexes)
or (include and not self.connection.features.supports_covering_indexes)
or (
expressions and not self.connection.features.supports_expression_indexes
)
):
return None
if condition or include or opclasses or expressions:
sql = self.sql_delete_index
else:
sql = self.sql_delete_unique
return self._delete_constraint_sql(sql, model, name)
def _check_sql(self, name, check):
return self.sql_constraint % {
"name": self.quote_name(name),
"constraint": self.sql_check_constraint % {"check": check},
}
def _create_check_sql(self, model, name, check):
return Statement(
self.sql_create_check,
table=Table(model._meta.db_table, self.quote_name),
name=self.quote_name(name),
check=check,
)
def _delete_check_sql(self, model, name):
return self._delete_constraint_sql(self.sql_delete_check, model, name)
def _delete_constraint_sql(self, template, model, name):
return Statement(
template,
table=Table(model._meta.db_table, self.quote_name),
name=self.quote_name(name),
)
def _constraint_names(
self,
model,
column_names=None,
unique=None,
primary_key=None,
index=None,
foreign_key=None,
check=None,
type_=None,
exclude=None,
):
"""Return all constraint names matching the columns and conditions."""
if column_names is not None:
column_names = [
self.connection.introspection.identifier_converter(name)
for name in column_names
]
with self.connection.cursor() as cursor:
constraints = self.connection.introspection.get_constraints(
cursor, model._meta.db_table
)
result = []
for name, infodict in constraints.items():
if column_names is None or column_names == infodict["columns"]:
if unique is not None and infodict["unique"] != unique:
continue
if primary_key is not None and infodict["primary_key"] != primary_key:
continue
if index is not None and infodict["index"] != index:
continue
if check is not None and infodict["check"] != check:
continue
if foreign_key is not None and not infodict["foreign_key"]:
continue
if type_ is not None and infodict["type"] != type_:
continue
if not exclude or name not in exclude:
result.append(name)
return result
def _delete_primary_key(self, model, strict=False):
constraint_names = self._constraint_names(model, primary_key=True)
if strict and len(constraint_names) != 1:
raise ValueError(
"Found wrong number (%s) of PK constraints for %s"
% (
len(constraint_names),
model._meta.db_table,
)
)
for constraint_name in constraint_names:
self.execute(self._delete_primary_key_sql(model, constraint_name))
def _create_primary_key_sql(self, model, field):
return Statement(
self.sql_create_pk,
table=Table(model._meta.db_table, self.quote_name),
name=self.quote_name(
self._create_index_name(
model._meta.db_table, [field.column], suffix="_pk"
)
),
columns=Columns(model._meta.db_table, [field.column], self.quote_name),
)
def _delete_primary_key_sql(self, model, name):
return self._delete_constraint_sql(self.sql_delete_pk, model, name)
def _collate_sql(self, collation):
return "COLLATE " + self.quote_name(collation)
def remove_procedure(self, procedure_name, param_types=()):
sql = self.sql_delete_procedure % {
"procedure": self.quote_name(procedure_name),
"param_types": ",".join(param_types),
}
self.execute(sql)
|
35e3ebd24480faa61163b187d840cf2c632d4166af521e8e551b7e4bfa2bed34 | import psycopg2
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.backends.ddl_references import IndexColumns
from django.db.backends.utils import strip_quotes
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
# Setting all constraints to IMMEDIATE to allow changing data in the same
# transaction.
sql_update_with_default = (
"UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL"
"; SET CONSTRAINTS ALL IMMEDIATE"
)
sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE"
sql_create_index = (
"CREATE INDEX %(name)s ON %(table)s%(using)s "
"(%(columns)s)%(include)s%(extra)s%(condition)s"
)
sql_create_index_concurrently = (
"CREATE INDEX CONCURRENTLY %(name)s ON %(table)s%(using)s "
"(%(columns)s)%(include)s%(extra)s%(condition)s"
)
sql_delete_index = "DROP INDEX IF EXISTS %(name)s"
sql_delete_index_concurrently = "DROP INDEX CONCURRENTLY IF EXISTS %(name)s"
# Setting the constraint to IMMEDIATE to allow changing data in the same
# transaction.
sql_create_column_inline_fk = (
"CONSTRAINT %(name)s REFERENCES %(to_table)s(%(to_column)s)%(deferrable)s"
"; SET CONSTRAINTS %(namespace)s%(name)s IMMEDIATE"
)
# Setting the constraint to IMMEDIATE runs any deferred checks to allow
# dropping it in the same transaction.
sql_delete_fk = (
"SET CONSTRAINTS %(name)s IMMEDIATE; "
"ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
)
sql_delete_procedure = "DROP FUNCTION %(procedure)s(%(param_types)s)"
sql_add_identity = (
"ALTER TABLE %(table)s ALTER COLUMN %(column)s ADD "
"GENERATED BY DEFAULT AS IDENTITY"
)
sql_drop_indentity = (
"ALTER TABLE %(table)s ALTER COLUMN %(column)s DROP IDENTITY IF EXISTS"
)
def quote_value(self, value):
if isinstance(value, str):
value = value.replace("%", "%%")
adapted = psycopg2.extensions.adapt(value)
if hasattr(adapted, "encoding"):
adapted.encoding = "utf8"
# getquoted() returns a quoted bytestring of the adapted value.
return adapted.getquoted().decode()
def _field_indexes_sql(self, model, field):
output = super()._field_indexes_sql(model, field)
like_index_statement = self._create_like_index_sql(model, field)
if like_index_statement is not None:
output.append(like_index_statement)
return output
def _field_data_type(self, field):
if field.is_relation:
return field.rel_db_type(self.connection)
return self.connection.data_types.get(
field.get_internal_type(),
field.db_type(self.connection),
)
def _field_base_data_types(self, field):
# Yield base data types for array fields.
if field.base_field.get_internal_type() == "ArrayField":
yield from self._field_base_data_types(field.base_field)
else:
yield self._field_data_type(field.base_field)
def _create_like_index_sql(self, model, field):
"""
Return the statement to create an index with varchar operator pattern
when the column type is 'varchar' or 'text', otherwise return None.
"""
db_type = field.db_type(connection=self.connection)
if db_type is not None and (field.db_index or field.unique):
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
#
# The same doesn't apply to array fields such as varchar[size]
# and text[size], so skip them.
if "[" in db_type:
return None
# Non-deterministic collations on Postgresql don't support indexes
# for operator classes varchar_pattern_ops/text_pattern_ops.
if getattr(field, "db_collation", None):
return None
if db_type.startswith("varchar"):
return self._create_index_sql(
model,
fields=[field],
suffix="_like",
opclasses=["varchar_pattern_ops"],
)
elif db_type.startswith("text"):
return self._create_index_sql(
model,
fields=[field],
suffix="_like",
opclasses=["text_pattern_ops"],
)
return None
def _using_sql(self, new_field, old_field):
using_sql = " USING %(column)s::%(type)s"
new_internal_type = new_field.get_internal_type()
old_internal_type = old_field.get_internal_type()
if new_internal_type == "ArrayField" and new_internal_type == old_internal_type:
# Compare base data types for array fields.
if list(self._field_base_data_types(old_field)) != list(
self._field_base_data_types(new_field)
):
return using_sql
elif self._field_data_type(old_field) != self._field_data_type(new_field):
return using_sql
return ""
def _alter_column_type_sql(self, model, old_field, new_field, new_type):
self.sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
# Cast when data type changed.
if using_sql := self._using_sql(new_field, old_field):
self.sql_alter_column_type += using_sql
new_internal_type = new_field.get_internal_type()
old_internal_type = old_field.get_internal_type()
# Make ALTER TYPE with IDENTITY make sense.
table = strip_quotes(model._meta.db_table)
auto_field_types = {
"AutoField",
"BigAutoField",
"SmallAutoField",
}
old_is_auto = old_internal_type in auto_field_types
new_is_auto = new_internal_type in auto_field_types
if new_is_auto and not old_is_auto:
column = strip_quotes(new_field.column)
return (
(
self.sql_alter_column_type
% {
"column": self.quote_name(column),
"type": new_type,
},
[],
),
[
(
self.sql_add_identity
% {
"table": self.quote_name(table),
"column": self.quote_name(column),
},
[],
),
],
)
elif old_is_auto and not new_is_auto:
# Drop IDENTITY if exists (pre-Django 4.1 serial columns don't have
# it).
self.execute(
self.sql_drop_indentity
% {
"table": self.quote_name(table),
"column": self.quote_name(strip_quotes(old_field.column)),
}
)
column = strip_quotes(new_field.column)
sequence_name = "%s_%s_seq" % (table, column)
fragment, _ = super()._alter_column_type_sql(
model, old_field, new_field, new_type
)
return fragment, [
(
# Drop the sequence if exists (Django 4.1+ identity columns
# don't have it).
self.sql_delete_sequence
% {
"sequence": self.quote_name(sequence_name),
},
[],
),
]
else:
return super()._alter_column_type_sql(model, old_field, new_field, new_type)
def _alter_column_collation_sql(
self, model, new_field, new_type, new_collation, old_field
):
sql = self.sql_alter_column_collate
# Cast when data type changed.
if using_sql := self._using_sql(new_field, old_field):
sql += using_sql
return (
sql
% {
"column": self.quote_name(new_field.column),
"type": new_type,
"collation": " " + self._collate_sql(new_collation)
if new_collation
else "",
},
[],
)
def _alter_field(
self,
model,
old_field,
new_field,
old_type,
new_type,
old_db_params,
new_db_params,
strict=False,
):
# Drop indexes on varchar/text/citext columns that are changing to a
# different type.
if (old_field.db_index or old_field.unique) and (
(old_type.startswith("varchar") and not new_type.startswith("varchar"))
or (old_type.startswith("text") and not new_type.startswith("text"))
or (old_type.startswith("citext") and not new_type.startswith("citext"))
):
index_name = self._create_index_name(
model._meta.db_table, [old_field.column], suffix="_like"
)
self.execute(self._delete_index_sql(model, index_name))
super()._alter_field(
model,
old_field,
new_field,
old_type,
new_type,
old_db_params,
new_db_params,
strict,
)
# Added an index? Create any PostgreSQL-specific indexes.
if (not (old_field.db_index or old_field.unique) and new_field.db_index) or (
not old_field.unique and new_field.unique
):
like_index_statement = self._create_like_index_sql(model, new_field)
if like_index_statement is not None:
self.execute(like_index_statement)
# Removed an index? Drop any PostgreSQL-specific indexes.
if old_field.unique and not (new_field.db_index or new_field.unique):
index_to_remove = self._create_index_name(
model._meta.db_table, [old_field.column], suffix="_like"
)
self.execute(self._delete_index_sql(model, index_to_remove))
def _index_columns(self, table, columns, col_suffixes, opclasses):
if opclasses:
return IndexColumns(
table,
columns,
self.quote_name,
col_suffixes=col_suffixes,
opclasses=opclasses,
)
return super()._index_columns(table, columns, col_suffixes, opclasses)
def add_index(self, model, index, concurrently=False):
self.execute(
index.create_sql(model, self, concurrently=concurrently), params=None
)
def remove_index(self, model, index, concurrently=False):
self.execute(index.remove_sql(model, self, concurrently=concurrently))
def _delete_index_sql(self, model, name, sql=None, concurrently=False):
sql = (
self.sql_delete_index_concurrently
if concurrently
else self.sql_delete_index
)
return super()._delete_index_sql(model, name, sql)
def _create_index_sql(
self,
model,
*,
fields=None,
name=None,
suffix="",
using="",
db_tablespace=None,
col_suffixes=(),
sql=None,
opclasses=(),
condition=None,
concurrently=False,
include=None,
expressions=None,
):
sql = (
self.sql_create_index
if not concurrently
else self.sql_create_index_concurrently
)
return super()._create_index_sql(
model,
fields=fields,
name=name,
suffix=suffix,
using=using,
db_tablespace=db_tablespace,
col_suffixes=col_suffixes,
sql=sql,
opclasses=opclasses,
condition=condition,
include=include,
expressions=expressions,
)
|
db1704647a34449e9da7d5cc53a43609c0008bf150c2e67751bca8507c1f3bc9 | import datetime
from unittest import mock
from django.contrib.postgres.indexes import OpClass
from django.core.exceptions import ValidationError
from django.db import IntegrityError, NotSupportedError, connection, transaction
from django.db.models import (
CheckConstraint,
Deferrable,
F,
Func,
IntegerField,
Q,
UniqueConstraint,
)
from django.db.models.fields.json import KeyTextTransform
from django.db.models.functions import Cast, Left, Lower
from django.test import ignore_warnings, modify_settings, skipUnlessDBFeature
from django.utils import timezone
from django.utils.deprecation import RemovedInDjango50Warning
from . import PostgreSQLTestCase
from .models import HotelReservation, IntegerArrayModel, RangesModel, Room, Scene
try:
from psycopg2.extras import DateRange, NumericRange
from django.contrib.postgres.constraints import ExclusionConstraint
from django.contrib.postgres.fields import (
DateTimeRangeField,
RangeBoundary,
RangeOperators,
)
except ImportError:
pass
@modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"})
class SchemaTests(PostgreSQLTestCase):
get_opclass_query = """
SELECT opcname, c.relname FROM pg_opclass AS oc
JOIN pg_index as i on oc.oid = ANY(i.indclass)
JOIN pg_class as c on c.oid = i.indexrelid
WHERE c.relname = %s
"""
def get_constraints(self, table):
"""Get the constraints on the table using a new cursor."""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def test_check_constraint_range_value(self):
constraint_name = "ints_between"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = CheckConstraint(
check=Q(ints__contained_by=NumericRange(10, 30)),
name=constraint_name,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(20, 50))
RangesModel.objects.create(ints=(10, 30))
def test_check_constraint_array_contains(self):
constraint = CheckConstraint(
check=Q(field__contains=[1]),
name="array_contains",
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(IntegerArrayModel, IntegerArrayModel())
constraint.validate(IntegerArrayModel, IntegerArrayModel(field=[1]))
def test_check_constraint_daterange_contains(self):
constraint_name = "dates_contains"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = CheckConstraint(
check=Q(dates__contains=F("dates_inner")),
name=constraint_name,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
date_1 = datetime.date(2016, 1, 1)
date_2 = datetime.date(2016, 1, 4)
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(
dates=(date_1, date_2),
dates_inner=(date_1, date_2.replace(day=5)),
)
RangesModel.objects.create(
dates=(date_1, date_2),
dates_inner=(date_1, date_2),
)
def test_check_constraint_datetimerange_contains(self):
constraint_name = "timestamps_contains"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = CheckConstraint(
check=Q(timestamps__contains=F("timestamps_inner")),
name=constraint_name,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
datetime_1 = datetime.datetime(2016, 1, 1)
datetime_2 = datetime.datetime(2016, 1, 2, 12)
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(
timestamps=(datetime_1, datetime_2),
timestamps_inner=(datetime_1, datetime_2.replace(hour=13)),
)
RangesModel.objects.create(
timestamps=(datetime_1, datetime_2),
timestamps_inner=(datetime_1, datetime_2),
)
def test_check_constraint_range_contains(self):
constraint = CheckConstraint(
check=Q(ints__contains=(1, 5)),
name="ints_contains",
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(RangesModel, RangesModel(ints=(6, 10)))
def test_check_constraint_range_lower_upper(self):
constraint = CheckConstraint(
check=Q(ints__startswith__gte=0) & Q(ints__endswith__lte=99),
name="ints_range_lower_upper",
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(RangesModel, RangesModel(ints=(-1, 20)))
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(RangesModel, RangesModel(ints=(0, 100)))
constraint.validate(RangesModel, RangesModel(ints=(0, 99)))
def test_check_constraint_range_lower_with_nulls(self):
constraint = CheckConstraint(
check=Q(ints__isnull=True) | Q(ints__startswith__gte=0),
name="ints_optional_positive_range",
)
constraint.validate(RangesModel, RangesModel())
constraint = CheckConstraint(
check=Q(ints__startswith__gte=0),
name="ints_positive_range",
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(RangesModel, RangesModel())
def test_opclass(self):
constraint = UniqueConstraint(
name="test_opclass",
fields=["scene"],
opclasses=["varchar_pattern_ops"],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
self.assertIn(constraint.name, self.get_constraints(Scene._meta.db_table))
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertEqual(
cursor.fetchall(),
[("varchar_pattern_ops", constraint.name)],
)
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Scene, constraint)
self.assertNotIn(constraint.name, self.get_constraints(Scene._meta.db_table))
def test_opclass_multiple_columns(self):
constraint = UniqueConstraint(
name="test_opclass_multiple",
fields=["scene", "setting"],
opclasses=["varchar_pattern_ops", "text_pattern_ops"],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
expected_opclasses = (
("varchar_pattern_ops", constraint.name),
("text_pattern_ops", constraint.name),
)
self.assertCountEqual(cursor.fetchall(), expected_opclasses)
def test_opclass_partial(self):
constraint = UniqueConstraint(
name="test_opclass_partial",
fields=["scene"],
opclasses=["varchar_pattern_ops"],
condition=Q(setting__contains="Sir Bedemir's Castle"),
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertCountEqual(
cursor.fetchall(),
[("varchar_pattern_ops", constraint.name)],
)
@skipUnlessDBFeature("supports_covering_indexes")
def test_opclass_include(self):
constraint = UniqueConstraint(
name="test_opclass_include",
fields=["scene"],
opclasses=["varchar_pattern_ops"],
include=["setting"],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertCountEqual(
cursor.fetchall(),
[("varchar_pattern_ops", constraint.name)],
)
@skipUnlessDBFeature("supports_expression_indexes")
def test_opclass_func(self):
constraint = UniqueConstraint(
OpClass(Lower("scene"), name="text_pattern_ops"),
name="test_opclass_func",
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
constraints = self.get_constraints(Scene._meta.db_table)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertIn(constraint.name, constraints)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertEqual(
cursor.fetchall(),
[("text_pattern_ops", constraint.name)],
)
Scene.objects.create(scene="Scene 10", setting="The dark forest of Ewing")
with self.assertRaises(IntegrityError), transaction.atomic():
Scene.objects.create(scene="ScEnE 10", setting="Sir Bedemir's Castle")
Scene.objects.create(scene="Scene 5", setting="Sir Bedemir's Castle")
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Scene, constraint)
self.assertNotIn(constraint.name, self.get_constraints(Scene._meta.db_table))
Scene.objects.create(scene="ScEnE 10", setting="Sir Bedemir's Castle")
@modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"})
class ExclusionConstraintTests(PostgreSQLTestCase):
def get_constraints(self, table):
"""Get the constraints on the table using a new cursor."""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def test_invalid_condition(self):
msg = "ExclusionConstraint.condition must be a Q instance."
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
index_type="GIST",
name="exclude_invalid_condition",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
condition=F("invalid"),
)
def test_invalid_index_type(self):
msg = "Exclusion constraints only support GiST or SP-GiST indexes."
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
index_type="gin",
name="exclude_invalid_index_type",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
)
def test_invalid_expressions(self):
msg = "The expressions must be a list of 2-tuples."
for expressions in (["foo"], [("foo")], [("foo_1", "foo_2", "foo_3")]):
with self.subTest(expressions), self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
index_type="GIST",
name="exclude_invalid_expressions",
expressions=expressions,
)
def test_empty_expressions(self):
msg = "At least one expression is required to define an exclusion constraint."
for empty_expressions in (None, []):
with self.subTest(empty_expressions), self.assertRaisesMessage(
ValueError, msg
):
ExclusionConstraint(
index_type="GIST",
name="exclude_empty_expressions",
expressions=empty_expressions,
)
def test_invalid_deferrable(self):
msg = "ExclusionConstraint.deferrable must be a Deferrable instance."
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name="exclude_invalid_deferrable",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
deferrable="invalid",
)
def test_deferrable_with_condition(self):
msg = "ExclusionConstraint with conditions cannot be deferred."
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name="exclude_invalid_condition",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
condition=Q(cancelled=False),
deferrable=Deferrable.DEFERRED,
)
def test_invalid_include_type(self):
msg = "ExclusionConstraint.include must be a list or tuple."
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name="exclude_invalid_include",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
include="invalid",
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_invalid_opclasses_type(self):
msg = "ExclusionConstraint.opclasses must be a list or tuple."
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name="exclude_invalid_opclasses",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
opclasses="invalid",
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_opclasses_and_expressions_same_length(self):
msg = (
"ExclusionConstraint.expressions and "
"ExclusionConstraint.opclasses must have the same number of "
"elements."
)
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name="exclude_invalid_expressions_opclasses_length",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
opclasses=["foo", "bar"],
)
def test_repr(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
(F("datespan"), RangeOperators.OVERLAPS),
(F("room"), RangeOperators.EQUAL),
],
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '&&'), (F(room), '=')] name='exclude_overlapping'>",
)
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)],
condition=Q(cancelled=False),
index_type="SPGiST",
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='SPGiST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"condition=(AND: ('cancelled', False))>",
)
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)],
deferrable=Deferrable.IMMEDIATE,
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"deferrable=Deferrable.IMMEDIATE>",
)
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)],
include=["cancelled", "room"],
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"include=('cancelled', 'room')>",
)
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
(OpClass("datespan", name="range_ops"), RangeOperators.ADJACENT_TO),
],
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(OpClass(F(datespan), name=range_ops), '-|-')] "
"name='exclude_overlapping'>",
)
def test_eq(self):
constraint_1 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
(F("datespan"), RangeOperators.OVERLAPS),
(F("room"), RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
)
constraint_2 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
)
constraint_3 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[("datespan", RangeOperators.OVERLAPS)],
condition=Q(cancelled=False),
)
constraint_4 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
deferrable=Deferrable.DEFERRED,
)
constraint_5 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
deferrable=Deferrable.IMMEDIATE,
)
constraint_6 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
deferrable=Deferrable.IMMEDIATE,
include=["cancelled"],
)
constraint_7 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
include=["cancelled"],
)
with ignore_warnings(category=RemovedInDjango50Warning):
constraint_8 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
include=["cancelled"],
opclasses=["range_ops", "range_ops"],
)
constraint_9 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
opclasses=["range_ops", "range_ops"],
)
self.assertNotEqual(constraint_2, constraint_9)
self.assertNotEqual(constraint_7, constraint_8)
constraint_10 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
(F("datespan"), RangeOperators.OVERLAPS),
(F("room"), RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
violation_error_message="custom error",
)
constraint_11 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
(F("datespan"), RangeOperators.OVERLAPS),
(F("room"), RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
violation_error_message="other custom error",
)
self.assertEqual(constraint_1, constraint_1)
self.assertEqual(constraint_1, mock.ANY)
self.assertNotEqual(constraint_1, constraint_2)
self.assertNotEqual(constraint_1, constraint_3)
self.assertNotEqual(constraint_1, constraint_4)
self.assertNotEqual(constraint_1, constraint_10)
self.assertNotEqual(constraint_2, constraint_3)
self.assertNotEqual(constraint_2, constraint_4)
self.assertNotEqual(constraint_2, constraint_7)
self.assertNotEqual(constraint_4, constraint_5)
self.assertNotEqual(constraint_5, constraint_6)
self.assertNotEqual(constraint_1, object())
self.assertNotEqual(constraint_10, constraint_11)
self.assertEqual(constraint_10, constraint_10)
def test_deconstruct(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(
path, "django.contrib.postgres.constraints.ExclusionConstraint"
)
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "exclude_overlapping",
"expressions": [
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
},
)
def test_deconstruct_index_type(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
index_type="SPGIST",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(
path, "django.contrib.postgres.constraints.ExclusionConstraint"
)
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "exclude_overlapping",
"index_type": "SPGIST",
"expressions": [
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
},
)
def test_deconstruct_condition(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(
path, "django.contrib.postgres.constraints.ExclusionConstraint"
)
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "exclude_overlapping",
"expressions": [
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
"condition": Q(cancelled=False),
},
)
def test_deconstruct_deferrable(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[("datespan", RangeOperators.OVERLAPS)],
deferrable=Deferrable.DEFERRED,
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(
path, "django.contrib.postgres.constraints.ExclusionConstraint"
)
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "exclude_overlapping",
"expressions": [("datespan", RangeOperators.OVERLAPS)],
"deferrable": Deferrable.DEFERRED,
},
)
def test_deconstruct_include(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[("datespan", RangeOperators.OVERLAPS)],
include=["cancelled", "room"],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(
path, "django.contrib.postgres.constraints.ExclusionConstraint"
)
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "exclude_overlapping",
"expressions": [("datespan", RangeOperators.OVERLAPS)],
"include": ("cancelled", "room"),
},
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_deconstruct_opclasses(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[("datespan", RangeOperators.OVERLAPS)],
opclasses=["range_ops"],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(
path, "django.contrib.postgres.constraints.ExclusionConstraint"
)
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "exclude_overlapping",
"expressions": [("datespan", RangeOperators.OVERLAPS)],
"opclasses": ["range_ops"],
},
)
def _test_range_overlaps(self, constraint):
# Create exclusion constraint.
self.assertNotIn(
constraint.name, self.get_constraints(HotelReservation._meta.db_table)
)
with connection.schema_editor() as editor:
editor.add_constraint(HotelReservation, constraint)
self.assertIn(
constraint.name, self.get_constraints(HotelReservation._meta.db_table)
)
# Add initial reservations.
room101 = Room.objects.create(number=101)
room102 = Room.objects.create(number=102)
datetimes = [
timezone.datetime(2018, 6, 20),
timezone.datetime(2018, 6, 24),
timezone.datetime(2018, 6, 26),
timezone.datetime(2018, 6, 28),
timezone.datetime(2018, 6, 29),
]
reservation = HotelReservation.objects.create(
datespan=DateRange(datetimes[0].date(), datetimes[1].date()),
start=datetimes[0],
end=datetimes[1],
room=room102,
)
constraint.validate(HotelReservation, reservation)
HotelReservation.objects.create(
datespan=DateRange(datetimes[1].date(), datetimes[3].date()),
start=datetimes[1],
end=datetimes[3],
room=room102,
)
HotelReservation.objects.create(
datespan=DateRange(datetimes[3].date(), datetimes[4].date()),
start=datetimes[3],
end=datetimes[4],
room=room102,
cancelled=True,
)
# Overlap dates.
with self.assertRaises(IntegrityError), transaction.atomic():
reservation = HotelReservation(
datespan=(datetimes[1].date(), datetimes[2].date()),
start=datetimes[1],
end=datetimes[2],
room=room102,
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(HotelReservation, reservation)
reservation.save()
# Valid range.
other_valid_reservations = [
# Other room.
HotelReservation(
datespan=(datetimes[1].date(), datetimes[2].date()),
start=datetimes[1],
end=datetimes[2],
room=room101,
),
# Cancelled reservation.
HotelReservation(
datespan=(datetimes[1].date(), datetimes[1].date()),
start=datetimes[1],
end=datetimes[2],
room=room102,
cancelled=True,
),
# Other adjacent dates.
HotelReservation(
datespan=(datetimes[3].date(), datetimes[4].date()),
start=datetimes[3],
end=datetimes[4],
room=room102,
),
]
for reservation in other_valid_reservations:
constraint.validate(HotelReservation, reservation)
HotelReservation.objects.bulk_create(other_valid_reservations)
# Excluded fields.
constraint.validate(
HotelReservation,
HotelReservation(
datespan=(datetimes[1].date(), datetimes[2].date()),
start=datetimes[1],
end=datetimes[2],
room=room102,
),
exclude={"room"},
)
constraint.validate(
HotelReservation,
HotelReservation(
datespan=(datetimes[1].date(), datetimes[2].date()),
start=datetimes[1],
end=datetimes[2],
room=room102,
),
exclude={"datespan", "start", "end", "room"},
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_range_overlaps_custom_opclasses(self):
class TsTzRange(Func):
function = "TSTZRANGE"
output_field = DateTimeRangeField()
constraint = ExclusionConstraint(
name="exclude_overlapping_reservations_custom",
expressions=[
(TsTzRange("start", "end", RangeBoundary()), RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
opclasses=["range_ops", "gist_int4_ops"],
)
self._test_range_overlaps(constraint)
def test_range_overlaps_custom(self):
class TsTzRange(Func):
function = "TSTZRANGE"
output_field = DateTimeRangeField()
constraint = ExclusionConstraint(
name="exclude_overlapping_reservations_custom_opclass",
expressions=[
(
OpClass(TsTzRange("start", "end", RangeBoundary()), "range_ops"),
RangeOperators.OVERLAPS,
),
(OpClass("room", "gist_int4_ops"), RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
)
self._test_range_overlaps(constraint)
def test_range_overlaps(self):
constraint = ExclusionConstraint(
name="exclude_overlapping_reservations",
expressions=[
(F("datespan"), RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
)
self._test_range_overlaps(constraint)
def test_range_adjacent(self):
constraint_name = "ints_adjacent"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(RangesModel, constraint)
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
def test_validate_range_adjacent(self):
constraint = ExclusionConstraint(
name="ints_adjacent",
expressions=[("ints", RangeOperators.ADJACENT_TO)],
violation_error_message="Custom error message.",
)
range_obj = RangesModel.objects.create(ints=(20, 50))
constraint.validate(RangesModel, range_obj)
msg = "Custom error message."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(RangesModel, RangesModel(ints=(10, 20)))
constraint.validate(RangesModel, RangesModel(ints=(10, 19)))
constraint.validate(RangesModel, RangesModel(ints=(51, 60)))
constraint.validate(RangesModel, RangesModel(ints=(10, 20)), exclude={"ints"})
def test_expressions_with_params(self):
constraint_name = "scene_left_equal"
self.assertNotIn(constraint_name, self.get_constraints(Scene._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[(Left("scene", 4), RangeOperators.EQUAL)],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
self.assertIn(constraint_name, self.get_constraints(Scene._meta.db_table))
def test_expressions_with_key_transform(self):
constraint_name = "exclude_overlapping_reservations_smoking"
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(F("datespan"), RangeOperators.OVERLAPS),
(KeyTextTransform("smoking", "requirements"), RangeOperators.EQUAL),
],
)
with connection.schema_editor() as editor:
editor.add_constraint(HotelReservation, constraint)
self.assertIn(
constraint_name,
self.get_constraints(HotelReservation._meta.db_table),
)
def test_index_transform(self):
constraint_name = "first_index_equal"
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("field__0", RangeOperators.EQUAL)],
)
with connection.schema_editor() as editor:
editor.add_constraint(IntegerArrayModel, constraint)
self.assertIn(
constraint_name,
self.get_constraints(IntegerArrayModel._meta.db_table),
)
def test_range_adjacent_initially_deferred(self):
constraint_name = "ints_adjacent_deferred"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
adjacent_range = RangesModel.objects.create(ints=(10, 20))
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(constraint_name)
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name)
# Remove adjacent range before the end of transaction.
adjacent_range.delete()
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
def test_range_adjacent_gist_include(self):
constraint_name = "ints_adjacent_gist_include"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="gist",
include=["decimals", "ints"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
@skipUnlessDBFeature("supports_covering_spgist_indexes")
def test_range_adjacent_spgist_include(self):
constraint_name = "ints_adjacent_spgist_include"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="spgist",
include=["decimals", "ints"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
def test_range_adjacent_gist_include_condition(self):
constraint_name = "ints_adjacent_gist_include_condition"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="gist",
include=["decimals"],
condition=Q(id__gte=100),
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@skipUnlessDBFeature("supports_covering_spgist_indexes")
def test_range_adjacent_spgist_include_condition(self):
constraint_name = "ints_adjacent_spgist_include_condition"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="spgist",
include=["decimals"],
condition=Q(id__gte=100),
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_range_adjacent_gist_include_deferrable(self):
constraint_name = "ints_adjacent_gist_include_deferrable"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="gist",
include=["decimals"],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@skipUnlessDBFeature("supports_covering_spgist_indexes")
def test_range_adjacent_spgist_include_deferrable(self):
constraint_name = "ints_adjacent_spgist_include_deferrable"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="spgist",
include=["decimals"],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_spgist_include_not_supported(self):
constraint_name = "ints_adjacent_spgist_include_not_supported"
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="spgist",
include=["id"],
)
msg = (
"Covering exclusion constraints using an SP-GiST index require "
"PostgreSQL 14+."
)
with connection.schema_editor() as editor:
with mock.patch(
"django.db.backends.postgresql.features.DatabaseFeatures."
"supports_covering_spgist_indexes",
False,
):
with self.assertRaisesMessage(NotSupportedError, msg):
editor.add_constraint(RangesModel, constraint)
def test_range_adjacent_opclass(self):
constraint_name = "ints_adjacent_opclass"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO),
],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
constraints = self.get_constraints(RangesModel._meta.db_table)
self.assertIn(constraint_name, constraints)
with editor.connection.cursor() as cursor:
cursor.execute(SchemaTests.get_opclass_query, [constraint_name])
self.assertEqual(
cursor.fetchall(),
[("range_ops", constraint_name)],
)
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(RangesModel, constraint)
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
def test_range_adjacent_opclass_condition(self):
constraint_name = "ints_adjacent_opclass_condition"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO),
],
condition=Q(id__gte=100),
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_range_adjacent_opclass_deferrable(self):
constraint_name = "ints_adjacent_opclass_deferrable"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO),
],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_range_adjacent_gist_opclass_include(self):
constraint_name = "ints_adjacent_gist_opclass_include"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO),
],
index_type="gist",
include=["decimals"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@skipUnlessDBFeature("supports_covering_spgist_indexes")
def test_range_adjacent_spgist_opclass_include(self):
constraint_name = "ints_adjacent_spgist_opclass_include"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO),
],
index_type="spgist",
include=["decimals"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_range_equal_cast(self):
constraint_name = "exclusion_equal_room_cast"
self.assertNotIn(constraint_name, self.get_constraints(Room._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[(Cast("number", IntegerField()), RangeOperators.EQUAL)],
)
with connection.schema_editor() as editor:
editor.add_constraint(Room, constraint)
self.assertIn(constraint_name, self.get_constraints(Room._meta.db_table))
@modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"})
class ExclusionConstraintOpclassesDepracationTests(PostgreSQLTestCase):
def get_constraints(self, table):
"""Get the constraints on the table using a new cursor."""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def test_warning(self):
msg = (
"The opclasses argument is deprecated in favor of using "
"django.contrib.postgres.indexes.OpClass in "
"ExclusionConstraint.expressions."
)
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
ExclusionConstraint(
name="exclude_overlapping",
expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)],
opclasses=["range_ops"],
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_repr(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)],
opclasses=["range_ops"],
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"opclasses=['range_ops']>",
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_range_adjacent_opclasses(self):
constraint_name = "ints_adjacent_opclasses"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
opclasses=["range_ops"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
constraints = self.get_constraints(RangesModel._meta.db_table)
self.assertIn(constraint_name, constraints)
with editor.connection.cursor() as cursor:
cursor.execute(SchemaTests.get_opclass_query, [constraint.name])
self.assertEqual(
cursor.fetchall(),
[("range_ops", constraint.name)],
)
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(RangesModel, constraint)
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_range_adjacent_opclasses_condition(self):
constraint_name = "ints_adjacent_opclasses_condition"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
opclasses=["range_ops"],
condition=Q(id__gte=100),
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@ignore_warnings(category=RemovedInDjango50Warning)
def test_range_adjacent_opclasses_deferrable(self):
constraint_name = "ints_adjacent_opclasses_deferrable"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
opclasses=["range_ops"],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@ignore_warnings(category=RemovedInDjango50Warning)
def test_range_adjacent_gist_opclasses_include(self):
constraint_name = "ints_adjacent_gist_opclasses_include"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="gist",
opclasses=["range_ops"],
include=["decimals"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@ignore_warnings(category=RemovedInDjango50Warning)
@skipUnlessDBFeature("supports_covering_spgist_indexes")
def test_range_adjacent_spgist_opclasses_include(self):
constraint_name = "ints_adjacent_spgist_opclasses_include"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="spgist",
opclasses=["range_ops"],
include=["decimals"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
|
2486db20b8f76c24dcfedb97cdf23368ff66eb8c39e400f9f7601a487f90af07 | import unittest
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from operator import attrgetter, itemgetter
from uuid import UUID
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
BinaryField,
BooleanField,
Case,
Count,
DecimalField,
F,
GenericIPAddressField,
IntegerField,
Max,
Min,
Q,
Sum,
TextField,
Value,
When,
)
from django.test import SimpleTestCase, TestCase
from .models import CaseTestModel, Client, FKCaseTestModel, O2OCaseTestModel
try:
from PIL import Image
except ImportError:
Image = None
class CaseExpressionTests(TestCase):
@classmethod
def setUpTestData(cls):
o = CaseTestModel.objects.create(integer=1, integer2=1, string="1")
O2OCaseTestModel.objects.create(o2o=o, integer=1)
FKCaseTestModel.objects.create(fk=o, integer=1)
o = CaseTestModel.objects.create(integer=2, integer2=3, string="2")
O2OCaseTestModel.objects.create(o2o=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=3)
o = CaseTestModel.objects.create(integer=3, integer2=4, string="3")
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=2, integer2=2, string="2")
O2OCaseTestModel.objects.create(o2o=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=3)
o = CaseTestModel.objects.create(integer=3, integer2=4, string="3")
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=3, integer2=3, string="3")
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=4, integer2=5, string="4")
O2OCaseTestModel.objects.create(o2o=o, integer=1)
FKCaseTestModel.objects.create(fk=o, integer=5)
cls.group_by_fields = [
f.name
for f in CaseTestModel._meta.get_fields()
if not (f.is_relation and f.auto_created)
and (
connection.features.allows_group_by_lob
or not isinstance(f, (BinaryField, TextField))
)
]
def test_annotate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=1, then=Value("one")),
When(integer=2, then=Value("two")),
default=Value("other"),
)
).order_by("pk"),
[
(1, "one"),
(2, "two"),
(3, "other"),
(2, "two"),
(3, "other"),
(3, "other"),
(4, "other"),
],
transform=attrgetter("integer", "test"),
)
def test_annotate_without_default(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=1, then=1),
When(integer=2, then=2),
)
).order_by("pk"),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter("integer", "test"),
)
def test_annotate_with_expression_as_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_test=Case(
When(integer=1, then=F("integer") + 1),
When(integer=2, then=F("integer") + 3),
default="integer",
)
).order_by("pk"),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 4)],
transform=attrgetter("integer", "f_test"),
)
def test_annotate_with_expression_as_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_test=Case(
When(integer2=F("integer"), then=Value("equal")),
When(integer2=F("integer") + 1, then=Value("+1")),
)
).order_by("pk"),
[
(1, "equal"),
(2, "+1"),
(3, "+1"),
(2, "equal"),
(3, "+1"),
(3, "equal"),
(4, "+1"),
],
transform=attrgetter("integer", "f_test"),
)
def test_annotate_with_join_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
join_test=Case(
When(integer=1, then=F("o2o_rel__integer") + 1),
When(integer=2, then=F("o2o_rel__integer") + 3),
default="o2o_rel__integer",
)
).order_by("pk"),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 1)],
transform=attrgetter("integer", "join_test"),
)
def test_annotate_with_in_clause(self):
fk_rels = FKCaseTestModel.objects.filter(integer__in=[5])
self.assertQuerysetEqual(
CaseTestModel.objects.only("pk", "integer")
.annotate(
in_test=Sum(
Case(
When(fk_rel__in=fk_rels, then=F("fk_rel__integer")),
default=Value(0),
)
)
)
.order_by("pk"),
[(1, 0), (2, 0), (3, 0), (2, 0), (3, 0), (3, 0), (4, 5)],
transform=attrgetter("integer", "in_test"),
)
def test_annotate_with_join_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
join_test=Case(
When(integer2=F("o2o_rel__integer"), then=Value("equal")),
When(integer2=F("o2o_rel__integer") + 1, then=Value("+1")),
default=Value("other"),
)
).order_by("pk"),
[
(1, "equal"),
(2, "+1"),
(3, "+1"),
(2, "equal"),
(3, "+1"),
(3, "equal"),
(4, "other"),
],
transform=attrgetter("integer", "join_test"),
)
def test_annotate_with_join_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
join_test=Case(
When(o2o_rel__integer=1, then=Value("one")),
When(o2o_rel__integer=2, then=Value("two")),
When(o2o_rel__integer=3, then=Value("three")),
default=Value("other"),
)
).order_by("pk"),
[
(1, "one"),
(2, "two"),
(3, "three"),
(2, "two"),
(3, "three"),
(3, "three"),
(4, "one"),
],
transform=attrgetter("integer", "join_test"),
)
def test_annotate_with_annotation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F("integer") + 1,
f_plus_3=F("integer") + 3,
)
.annotate(
f_test=Case(
When(integer=1, then="f_plus_1"),
When(integer=2, then="f_plus_3"),
default="integer",
),
)
.order_by("pk"),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 4)],
transform=attrgetter("integer", "f_test"),
)
def test_annotate_with_annotation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F("integer") + 1,
)
.annotate(
f_test=Case(
When(integer2=F("integer"), then=Value("equal")),
When(integer2=F("f_plus_1"), then=Value("+1")),
),
)
.order_by("pk"),
[
(1, "equal"),
(2, "+1"),
(3, "+1"),
(2, "equal"),
(3, "+1"),
(3, "equal"),
(4, "+1"),
],
transform=attrgetter("integer", "f_test"),
)
def test_annotate_with_annotation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_minus_2=F("integer") - 2,
)
.annotate(
test=Case(
When(f_minus_2=-1, then=Value("negative one")),
When(f_minus_2=0, then=Value("zero")),
When(f_minus_2=1, then=Value("one")),
default=Value("other"),
),
)
.order_by("pk"),
[
(1, "negative one"),
(2, "zero"),
(3, "one"),
(2, "zero"),
(3, "one"),
(3, "one"),
(4, "other"),
],
transform=attrgetter("integer", "test"),
)
def test_annotate_with_aggregation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.group_by_fields)
.annotate(
min=Min("fk_rel__integer"),
max=Max("fk_rel__integer"),
)
.annotate(
test=Case(
When(integer=2, then="min"),
When(integer=3, then="max"),
),
)
.order_by("pk"),
[
(1, None, 1, 1),
(2, 2, 2, 3),
(3, 4, 3, 4),
(2, 2, 2, 3),
(3, 4, 3, 4),
(3, 4, 3, 4),
(4, None, 5, 5),
],
transform=itemgetter("integer", "test", "min", "max"),
)
def test_annotate_with_aggregation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.group_by_fields)
.annotate(
min=Min("fk_rel__integer"),
max=Max("fk_rel__integer"),
)
.annotate(
test=Case(
When(integer2=F("min"), then=Value("min")),
When(integer2=F("max"), then=Value("max")),
),
)
.order_by("pk"),
[
(1, 1, "min"),
(2, 3, "max"),
(3, 4, "max"),
(2, 2, "min"),
(3, 4, "max"),
(3, 3, "min"),
(4, 5, "min"),
],
transform=itemgetter("integer", "integer2", "test"),
)
def test_annotate_with_aggregation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.group_by_fields)
.annotate(
max=Max("fk_rel__integer"),
)
.annotate(
test=Case(
When(max=3, then=Value("max = 3")),
When(max=4, then=Value("max = 4")),
default=Value(""),
),
)
.order_by("pk"),
[
(1, 1, ""),
(2, 3, "max = 3"),
(3, 4, "max = 4"),
(2, 3, "max = 3"),
(3, 4, "max = 4"),
(3, 4, "max = 4"),
(4, 5, ""),
],
transform=itemgetter("integer", "max", "test"),
)
def test_annotate_exclude(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=1, then=Value("one")),
When(integer=2, then=Value("two")),
default=Value("other"),
)
)
.exclude(test="other")
.order_by("pk"),
[(1, "one"), (2, "two"), (2, "two")],
transform=attrgetter("integer", "test"),
)
def test_annotate_filter_decimal(self):
obj = CaseTestModel.objects.create(integer=0, decimal=Decimal("1"))
qs = CaseTestModel.objects.annotate(
x=Case(When(integer=0, then=F("decimal"))),
y=Case(When(integer=0, then=Value(Decimal("1")))),
)
self.assertSequenceEqual(qs.filter(Q(x=1) & Q(x=Decimal("1"))), [obj])
self.assertSequenceEqual(qs.filter(Q(y=1) & Q(y=Decimal("1"))), [obj])
def test_annotate_values_not_in_order_by(self):
self.assertEqual(
list(
CaseTestModel.objects.annotate(
test=Case(
When(integer=1, then=Value("one")),
When(integer=2, then=Value("two")),
When(integer=3, then=Value("three")),
default=Value("other"),
)
)
.order_by("test")
.values_list("integer", flat=True)
),
[1, 4, 3, 3, 3, 2, 2],
)
def test_annotate_with_empty_when(self):
objects = CaseTestModel.objects.annotate(
selected=Case(
When(pk__in=[], then=Value("selected")),
default=Value("not selected"),
)
)
self.assertEqual(len(objects), CaseTestModel.objects.count())
self.assertTrue(all(obj.selected == "not selected" for obj in objects))
def test_annotate_with_full_when(self):
objects = CaseTestModel.objects.annotate(
selected=Case(
When(~Q(pk__in=[]), then=Value("selected")),
default=Value("not selected"),
)
)
self.assertEqual(len(objects), CaseTestModel.objects.count())
self.assertTrue(all(obj.selected == "selected" for obj in objects))
def test_combined_expression(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=1, then=2),
When(integer=2, then=1),
default=3,
)
+ 1,
).order_by("pk"),
[(1, 3), (2, 2), (3, 4), (2, 2), (3, 4), (3, 4), (4, 4)],
transform=attrgetter("integer", "test"),
)
def test_in_subquery(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(
pk__in=CaseTestModel.objects.annotate(
test=Case(
When(integer=F("integer2"), then="pk"),
When(integer=4, then="pk"),
),
).values("test")
).order_by("pk"),
[(1, 1), (2, 2), (3, 3), (4, 5)],
transform=attrgetter("integer", "integer2"),
)
def test_condition_with_lookups(self):
qs = CaseTestModel.objects.annotate(
test=Case(
When(Q(integer2=1), string="2", then=Value(False)),
When(Q(integer2=1), string="1", then=Value(True)),
default=Value(False),
output_field=BooleanField(),
),
)
self.assertIs(qs.get(integer=1).test, True)
def test_case_reuse(self):
SOME_CASE = Case(
When(pk=0, then=Value("0")),
default=Value("1"),
)
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(somecase=SOME_CASE).order_by("pk"),
CaseTestModel.objects.annotate(somecase=SOME_CASE)
.order_by("pk")
.values_list("pk", "somecase"),
lambda x: (x.pk, x.somecase),
)
def test_aggregate(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
one=Sum(
Case(
When(integer=1, then=1),
)
),
two=Sum(
Case(
When(integer=2, then=1),
)
),
three=Sum(
Case(
When(integer=3, then=1),
)
),
four=Sum(
Case(
When(integer=4, then=1),
)
),
),
{"one": 1, "two": 2, "three": 3, "four": 1},
)
def test_aggregate_with_expression_as_value(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
one=Sum(Case(When(integer=1, then="integer"))),
two=Sum(Case(When(integer=2, then=F("integer") - 1))),
three=Sum(Case(When(integer=3, then=F("integer") + 1))),
),
{"one": 1, "two": 2, "three": 12},
)
def test_aggregate_with_expression_as_condition(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
equal=Sum(
Case(
When(integer2=F("integer"), then=1),
)
),
plus_one=Sum(
Case(
When(integer2=F("integer") + 1, then=1),
)
),
),
{"equal": 3, "plus_one": 4},
)
def test_filter(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(
integer2=Case(
When(integer=2, then=3),
When(integer=3, then=4),
default=1,
)
).order_by("pk"),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter("integer", "integer2"),
)
def test_filter_without_default(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(
integer2=Case(
When(integer=2, then=3),
When(integer=3, then=4),
)
).order_by("pk"),
[(2, 3), (3, 4), (3, 4)],
transform=attrgetter("integer", "integer2"),
)
def test_filter_with_expression_as_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(
integer2=Case(
When(integer=2, then=F("integer") + 1),
When(integer=3, then=F("integer")),
default="integer",
)
).order_by("pk"),
[(1, 1), (2, 3), (3, 3)],
transform=attrgetter("integer", "integer2"),
)
def test_filter_with_expression_as_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(
string=Case(
When(integer2=F("integer"), then=Value("2")),
When(integer2=F("integer") + 1, then=Value("3")),
)
).order_by("pk"),
[(3, 4, "3"), (2, 2, "2"), (3, 4, "3")],
transform=attrgetter("integer", "integer2", "string"),
)
def test_filter_with_join_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(
integer2=Case(
When(integer=2, then=F("o2o_rel__integer") + 1),
When(integer=3, then=F("o2o_rel__integer")),
default="o2o_rel__integer",
)
).order_by("pk"),
[(1, 1), (2, 3), (3, 3)],
transform=attrgetter("integer", "integer2"),
)
def test_filter_with_join_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(
integer=Case(
When(integer2=F("o2o_rel__integer") + 1, then=2),
When(integer2=F("o2o_rel__integer"), then=3),
)
).order_by("pk"),
[(2, 3), (3, 3)],
transform=attrgetter("integer", "integer2"),
)
def test_filter_with_join_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(
integer2=Case(
When(o2o_rel__integer=1, then=1),
When(o2o_rel__integer=2, then=3),
When(o2o_rel__integer=3, then=4),
)
).order_by("pk"),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter("integer", "integer2"),
)
def test_filter_with_annotation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f=F("integer"),
f_plus_1=F("integer") + 1,
)
.filter(
integer2=Case(
When(integer=2, then="f_plus_1"),
When(integer=3, then="f"),
),
)
.order_by("pk"),
[(2, 3), (3, 3)],
transform=attrgetter("integer", "integer2"),
)
def test_filter_with_annotation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F("integer") + 1,
)
.filter(
integer=Case(
When(integer2=F("integer"), then=2),
When(integer2=F("f_plus_1"), then=3),
),
)
.order_by("pk"),
[(3, 4), (2, 2), (3, 4)],
transform=attrgetter("integer", "integer2"),
)
def test_filter_with_annotation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F("integer") + 1,
)
.filter(
integer2=Case(
When(f_plus_1=3, then=3),
When(f_plus_1=4, then=4),
default=1,
),
)
.order_by("pk"),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter("integer", "integer2"),
)
def test_filter_with_aggregation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.group_by_fields)
.annotate(
min=Min("fk_rel__integer"),
max=Max("fk_rel__integer"),
)
.filter(
integer2=Case(
When(integer=2, then="min"),
When(integer=3, then="max"),
),
)
.order_by("pk"),
[(3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4)],
transform=itemgetter("integer", "integer2", "min", "max"),
)
def test_filter_with_aggregation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.group_by_fields)
.annotate(
min=Min("fk_rel__integer"),
max=Max("fk_rel__integer"),
)
.filter(
integer=Case(
When(integer2=F("min"), then=2),
When(integer2=F("max"), then=3),
),
)
.order_by("pk"),
[(3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4)],
transform=itemgetter("integer", "integer2", "min", "max"),
)
def test_filter_with_aggregation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.group_by_fields)
.annotate(
max=Max("fk_rel__integer"),
)
.filter(
integer=Case(
When(max=3, then=2),
When(max=4, then=3),
),
)
.order_by("pk"),
[(2, 3, 3), (3, 4, 4), (2, 2, 3), (3, 4, 4), (3, 3, 4)],
transform=itemgetter("integer", "integer2", "max"),
)
def test_update(self):
CaseTestModel.objects.update(
string=Case(
When(integer=1, then=Value("one")),
When(integer=2, then=Value("two")),
default=Value("other"),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, "one"),
(2, "two"),
(3, "other"),
(2, "two"),
(3, "other"),
(3, "other"),
(4, "other"),
],
transform=attrgetter("integer", "string"),
)
def test_update_without_default(self):
CaseTestModel.objects.update(
integer2=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter("integer", "integer2"),
)
def test_update_with_expression_as_value(self):
CaseTestModel.objects.update(
integer=Case(
When(integer=1, then=F("integer") + 1),
When(integer=2, then=F("integer") + 3),
default="integer",
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[("1", 2), ("2", 5), ("3", 3), ("2", 5), ("3", 3), ("3", 3), ("4", 4)],
transform=attrgetter("string", "integer"),
)
def test_update_with_expression_as_condition(self):
CaseTestModel.objects.update(
string=Case(
When(integer2=F("integer"), then=Value("equal")),
When(integer2=F("integer") + 1, then=Value("+1")),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, "equal"),
(2, "+1"),
(3, "+1"),
(2, "equal"),
(3, "+1"),
(3, "equal"),
(4, "+1"),
],
transform=attrgetter("integer", "string"),
)
def test_update_with_join_in_condition_raise_field_error(self):
with self.assertRaisesMessage(
FieldError, "Joined field references are not permitted in this query"
):
CaseTestModel.objects.update(
integer=Case(
When(integer2=F("o2o_rel__integer") + 1, then=2),
When(integer2=F("o2o_rel__integer"), then=3),
),
)
def test_update_with_join_in_predicate_raise_field_error(self):
with self.assertRaisesMessage(
FieldError, "Joined field references are not permitted in this query"
):
CaseTestModel.objects.update(
string=Case(
When(o2o_rel__integer=1, then=Value("one")),
When(o2o_rel__integer=2, then=Value("two")),
When(o2o_rel__integer=3, then=Value("three")),
default=Value("other"),
),
)
def test_update_big_integer(self):
CaseTestModel.objects.update(
big_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter("integer", "big_integer"),
)
def test_update_binary(self):
CaseTestModel.objects.update(
binary=Case(
When(integer=1, then=b"one"),
When(integer=2, then=b"two"),
default=b"",
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, b"one"),
(2, b"two"),
(3, b""),
(2, b"two"),
(3, b""),
(3, b""),
(4, b""),
],
transform=lambda o: (o.integer, bytes(o.binary)),
)
def test_update_boolean(self):
CaseTestModel.objects.update(
boolean=Case(
When(integer=1, then=True),
When(integer=2, then=True),
default=False,
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, True),
(2, True),
(3, False),
(2, True),
(3, False),
(3, False),
(4, False),
],
transform=attrgetter("integer", "boolean"),
)
def test_update_date(self):
CaseTestModel.objects.update(
date=Case(
When(integer=1, then=date(2015, 1, 1)),
When(integer=2, then=date(2015, 1, 2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, date(2015, 1, 1)),
(2, date(2015, 1, 2)),
(3, None),
(2, date(2015, 1, 2)),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter("integer", "date"),
)
def test_update_date_time(self):
CaseTestModel.objects.update(
date_time=Case(
When(integer=1, then=datetime(2015, 1, 1)),
When(integer=2, then=datetime(2015, 1, 2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, datetime(2015, 1, 1)),
(2, datetime(2015, 1, 2)),
(3, None),
(2, datetime(2015, 1, 2)),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter("integer", "date_time"),
)
def test_update_decimal(self):
CaseTestModel.objects.update(
decimal=Case(
When(integer=1, then=Decimal("1.1")),
When(
integer=2, then=Value(Decimal("2.2"), output_field=DecimalField())
),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, Decimal("1.1")),
(2, Decimal("2.2")),
(3, None),
(2, Decimal("2.2")),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter("integer", "decimal"),
)
def test_update_duration(self):
CaseTestModel.objects.update(
duration=Case(
When(integer=1, then=timedelta(1)),
When(integer=2, then=timedelta(2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, timedelta(1)),
(2, timedelta(2)),
(3, None),
(2, timedelta(2)),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter("integer", "duration"),
)
def test_update_email(self):
CaseTestModel.objects.update(
email=Case(
When(integer=1, then=Value("[email protected]")),
When(integer=2, then=Value("[email protected]")),
default=Value(""),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, "[email protected]"),
(2, "[email protected]"),
(3, ""),
(2, "[email protected]"),
(3, ""),
(3, ""),
(4, ""),
],
transform=attrgetter("integer", "email"),
)
def test_update_file(self):
CaseTestModel.objects.update(
file=Case(
When(integer=1, then=Value("~/1")),
When(integer=2, then=Value("~/2")),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, "~/1"), (2, "~/2"), (3, ""), (2, "~/2"), (3, ""), (3, ""), (4, "")],
transform=lambda o: (o.integer, str(o.file)),
)
def test_update_file_path(self):
CaseTestModel.objects.update(
file_path=Case(
When(integer=1, then=Value("~/1")),
When(integer=2, then=Value("~/2")),
default=Value(""),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, "~/1"), (2, "~/2"), (3, ""), (2, "~/2"), (3, ""), (3, ""), (4, "")],
transform=attrgetter("integer", "file_path"),
)
def test_update_float(self):
CaseTestModel.objects.update(
float=Case(
When(integer=1, then=1.1),
When(integer=2, then=2.2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, 1.1), (2, 2.2), (3, None), (2, 2.2), (3, None), (3, None), (4, None)],
transform=attrgetter("integer", "float"),
)
@unittest.skipUnless(Image, "Pillow not installed")
def test_update_image(self):
CaseTestModel.objects.update(
image=Case(
When(integer=1, then=Value("~/1")),
When(integer=2, then=Value("~/2")),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, "~/1"), (2, "~/2"), (3, ""), (2, "~/2"), (3, ""), (3, ""), (4, "")],
transform=lambda o: (o.integer, str(o.image)),
)
def test_update_generic_ip_address(self):
CaseTestModel.objects.update(
generic_ip_address=Case(
When(integer=1, then=Value("1.1.1.1")),
When(integer=2, then=Value("2.2.2.2")),
output_field=GenericIPAddressField(),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, "1.1.1.1"),
(2, "2.2.2.2"),
(3, None),
(2, "2.2.2.2"),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter("integer", "generic_ip_address"),
)
def test_update_null_boolean(self):
CaseTestModel.objects.update(
null_boolean=Case(
When(integer=1, then=True),
When(integer=2, then=False),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, True),
(2, False),
(3, None),
(2, False),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter("integer", "null_boolean"),
)
def test_update_positive_big_integer(self):
CaseTestModel.objects.update(
positive_big_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter("integer", "positive_big_integer"),
)
def test_update_positive_integer(self):
CaseTestModel.objects.update(
positive_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter("integer", "positive_integer"),
)
def test_update_positive_small_integer(self):
CaseTestModel.objects.update(
positive_small_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter("integer", "positive_small_integer"),
)
def test_update_slug(self):
CaseTestModel.objects.update(
slug=Case(
When(integer=1, then=Value("1")),
When(integer=2, then=Value("2")),
default=Value(""),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, "1"), (2, "2"), (3, ""), (2, "2"), (3, ""), (3, ""), (4, "")],
transform=attrgetter("integer", "slug"),
)
def test_update_small_integer(self):
CaseTestModel.objects.update(
small_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter("integer", "small_integer"),
)
def test_update_string(self):
CaseTestModel.objects.filter(string__in=["1", "2"]).update(
string=Case(
When(integer=1, then=Value("1")),
When(integer=2, then=Value("2")),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.filter(string__in=["1", "2"]).order_by("pk"),
[(1, "1"), (2, "2"), (2, "2")],
transform=attrgetter("integer", "string"),
)
def test_update_text(self):
CaseTestModel.objects.update(
text=Case(
When(integer=1, then=Value("1")),
When(integer=2, then=Value("2")),
default=Value(""),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, "1"), (2, "2"), (3, ""), (2, "2"), (3, ""), (3, ""), (4, "")],
transform=attrgetter("integer", "text"),
)
def test_update_time(self):
CaseTestModel.objects.update(
time=Case(
When(integer=1, then=time(1)),
When(integer=2, then=time(2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, time(1)),
(2, time(2)),
(3, None),
(2, time(2)),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter("integer", "time"),
)
def test_update_url(self):
CaseTestModel.objects.update(
url=Case(
When(integer=1, then=Value("http://1.example.com/")),
When(integer=2, then=Value("http://2.example.com/")),
default=Value(""),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, "http://1.example.com/"),
(2, "http://2.example.com/"),
(3, ""),
(2, "http://2.example.com/"),
(3, ""),
(3, ""),
(4, ""),
],
transform=attrgetter("integer", "url"),
)
def test_update_uuid(self):
CaseTestModel.objects.update(
uuid=Case(
When(integer=1, then=UUID("11111111111111111111111111111111")),
When(integer=2, then=UUID("22222222222222222222222222222222")),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, UUID("11111111111111111111111111111111")),
(2, UUID("22222222222222222222222222222222")),
(3, None),
(2, UUID("22222222222222222222222222222222")),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter("integer", "uuid"),
)
def test_update_fk(self):
obj1, obj2 = CaseTestModel.objects.all()[:2]
CaseTestModel.objects.update(
fk=Case(
When(integer=1, then=obj1.pk),
When(integer=2, then=obj2.pk),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, obj1.pk),
(2, obj2.pk),
(3, None),
(2, obj2.pk),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter("integer", "fk_id"),
)
def test_lookup_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer__lt=2, then=Value("less than 2")),
When(integer__gt=2, then=Value("greater than 2")),
default=Value("equal to 2"),
),
).order_by("pk"),
[
(1, "less than 2"),
(2, "equal to 2"),
(3, "greater than 2"),
(2, "equal to 2"),
(3, "greater than 2"),
(3, "greater than 2"),
(4, "greater than 2"),
],
transform=attrgetter("integer", "test"),
)
def test_lookup_different_fields(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=2, integer2=3, then=Value("when")),
default=Value("default"),
),
).order_by("pk"),
[
(1, 1, "default"),
(2, 3, "when"),
(3, 4, "default"),
(2, 2, "default"),
(3, 4, "default"),
(3, 3, "default"),
(4, 5, "default"),
],
transform=attrgetter("integer", "integer2", "test"),
)
def test_combined_q_object(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(Q(integer=2) | Q(integer2=3), then=Value("when")),
default=Value("default"),
),
).order_by("pk"),
[
(1, 1, "default"),
(2, 3, "when"),
(3, 4, "default"),
(2, 2, "when"),
(3, 4, "default"),
(3, 3, "when"),
(4, 5, "default"),
],
transform=attrgetter("integer", "integer2", "test"),
)
def test_order_by_conditional_implicit(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer__lte=2)
.annotate(
test=Case(
When(integer=1, then=2),
When(integer=2, then=1),
default=3,
)
)
.order_by("test", "pk"),
[(2, 1), (2, 1), (1, 2)],
transform=attrgetter("integer", "test"),
)
def test_order_by_conditional_explicit(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer__lte=2)
.annotate(
test=Case(
When(integer=1, then=2),
When(integer=2, then=1),
default=3,
)
)
.order_by(F("test").asc(), "pk"),
[(2, 1), (2, 1), (1, 2)],
transform=attrgetter("integer", "test"),
)
def test_join_promotion(self):
o = CaseTestModel.objects.create(integer=1, integer2=1, string="1")
# Testing that:
# 1. There isn't any object on the remote side of the fk_rel
# relation. If the query used inner joins, then the join to fk_rel
# would remove o from the results. So, in effect we are testing that
# we are promoting the fk_rel join to a left outer join here.
# 2. The default value of 3 is generated for the case expression.
self.assertQuerysetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__pk=1, then=2),
default=3,
),
),
[(o, 3)],
lambda x: (x, x.foo),
)
# Now 2 should be generated, as the fk_rel is null.
self.assertQuerysetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__isnull=True, then=2),
default=3,
),
),
[(o, 2)],
lambda x: (x, x.foo),
)
def test_join_promotion_multiple_annotations(self):
o = CaseTestModel.objects.create(integer=1, integer2=1, string="1")
# Testing that:
# 1. There isn't any object on the remote side of the fk_rel
# relation. If the query used inner joins, then the join to fk_rel
# would remove o from the results. So, in effect we are testing that
# we are promoting the fk_rel join to a left outer join here.
# 2. The default value of 3 is generated for the case expression.
self.assertQuerysetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__pk=1, then=2),
default=3,
),
bar=Case(
When(fk_rel__pk=1, then=4),
default=5,
),
),
[(o, 3, 5)],
lambda x: (x, x.foo, x.bar),
)
# Now 2 should be generated, as the fk_rel is null.
self.assertQuerysetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__isnull=True, then=2),
default=3,
),
bar=Case(
When(fk_rel__isnull=True, then=4),
default=5,
),
),
[(o, 2, 4)],
lambda x: (x, x.foo, x.bar),
)
def test_m2m_exclude(self):
CaseTestModel.objects.create(integer=10, integer2=1, string="1")
qs = (
CaseTestModel.objects.values_list("id", "integer")
.annotate(
cnt=Sum(
Case(When(~Q(fk_rel__integer=1), then=1), default=2),
),
)
.order_by("integer")
)
# The first o has 2 as its fk_rel__integer=1, thus it hits the
# default=2 case. The other ones have 2 as the result as they have 2
# fk_rel objects, except for integer=4 and integer=10 (created above).
# The integer=4 case has one integer, thus the result is 1, and
# integer=10 doesn't have any and this too generates 1 (instead of 0)
# as ~Q() also matches nulls.
self.assertQuerysetEqual(
qs,
[(1, 2), (2, 2), (2, 2), (3, 2), (3, 2), (3, 2), (4, 1), (10, 1)],
lambda x: x[1:],
)
def test_m2m_reuse(self):
CaseTestModel.objects.create(integer=10, integer2=1, string="1")
# Need to use values before annotate so that Oracle will not group
# by fields it isn't capable of grouping by.
qs = (
CaseTestModel.objects.values_list("id", "integer")
.annotate(
cnt=Sum(
Case(When(~Q(fk_rel__integer=1), then=1), default=2),
),
)
.annotate(
cnt2=Sum(
Case(When(~Q(fk_rel__integer=1), then=1), default=2),
),
)
.order_by("integer")
)
self.assertEqual(str(qs.query).count(" JOIN "), 1)
self.assertQuerysetEqual(
qs,
[
(1, 2, 2),
(2, 2, 2),
(2, 2, 2),
(3, 2, 2),
(3, 2, 2),
(3, 2, 2),
(4, 1, 1),
(10, 1, 1),
],
lambda x: x[1:],
)
def test_aggregation_empty_cases(self):
tests = [
# Empty cases and default.
(Case(output_field=IntegerField()), None),
# Empty cases and a constant default.
(Case(default=Value("empty")), "empty"),
# Empty cases and column in the default.
(Case(default=F("url")), ""),
]
for case, value in tests:
with self.subTest(case=case):
self.assertQuerysetEqual(
CaseTestModel.objects.values("string")
.annotate(
case=case,
integer_sum=Sum("integer"),
)
.order_by("string"),
[
("1", value, 1),
("2", value, 4),
("3", value, 9),
("4", value, 4),
],
transform=itemgetter("string", "case", "integer_sum"),
)
class CaseDocumentationExamples(TestCase):
@classmethod
def setUpTestData(cls):
Client.objects.create(
name="Jane Doe",
account_type=Client.REGULAR,
registered_on=date.today() - timedelta(days=36),
)
Client.objects.create(
name="James Smith",
account_type=Client.GOLD,
registered_on=date.today() - timedelta(days=5),
)
Client.objects.create(
name="Jack Black",
account_type=Client.PLATINUM,
registered_on=date.today() - timedelta(days=10 * 365),
)
def test_simple_example(self):
self.assertQuerysetEqual(
Client.objects.annotate(
discount=Case(
When(account_type=Client.GOLD, then=Value("5%")),
When(account_type=Client.PLATINUM, then=Value("10%")),
default=Value("0%"),
),
).order_by("pk"),
[("Jane Doe", "0%"), ("James Smith", "5%"), ("Jack Black", "10%")],
transform=attrgetter("name", "discount"),
)
def test_lookup_example(self):
a_month_ago = date.today() - timedelta(days=30)
a_year_ago = date.today() - timedelta(days=365)
self.assertQuerysetEqual(
Client.objects.annotate(
discount=Case(
When(registered_on__lte=a_year_ago, then=Value("10%")),
When(registered_on__lte=a_month_ago, then=Value("5%")),
default=Value("0%"),
),
).order_by("pk"),
[("Jane Doe", "5%"), ("James Smith", "0%"), ("Jack Black", "10%")],
transform=attrgetter("name", "discount"),
)
def test_conditional_update_example(self):
a_month_ago = date.today() - timedelta(days=30)
a_year_ago = date.today() - timedelta(days=365)
Client.objects.update(
account_type=Case(
When(registered_on__lte=a_year_ago, then=Value(Client.PLATINUM)),
When(registered_on__lte=a_month_ago, then=Value(Client.GOLD)),
default=Value(Client.REGULAR),
),
)
self.assertQuerysetEqual(
Client.objects.order_by("pk"),
[("Jane Doe", "G"), ("James Smith", "R"), ("Jack Black", "P")],
transform=attrgetter("name", "account_type"),
)
def test_conditional_aggregation_example(self):
Client.objects.create(
name="Jean Grey",
account_type=Client.REGULAR,
registered_on=date.today(),
)
Client.objects.create(
name="James Bond",
account_type=Client.PLATINUM,
registered_on=date.today(),
)
Client.objects.create(
name="Jane Porter",
account_type=Client.PLATINUM,
registered_on=date.today(),
)
self.assertEqual(
Client.objects.aggregate(
regular=Count("pk", filter=Q(account_type=Client.REGULAR)),
gold=Count("pk", filter=Q(account_type=Client.GOLD)),
platinum=Count("pk", filter=Q(account_type=Client.PLATINUM)),
),
{"regular": 2, "gold": 1, "platinum": 3},
)
# This was the example before the filter argument was added.
self.assertEqual(
Client.objects.aggregate(
regular=Sum(
Case(
When(account_type=Client.REGULAR, then=1),
)
),
gold=Sum(
Case(
When(account_type=Client.GOLD, then=1),
)
),
platinum=Sum(
Case(
When(account_type=Client.PLATINUM, then=1),
)
),
),
{"regular": 2, "gold": 1, "platinum": 3},
)
def test_filter_example(self):
a_month_ago = date.today() - timedelta(days=30)
a_year_ago = date.today() - timedelta(days=365)
self.assertQuerysetEqual(
Client.objects.filter(
registered_on__lte=Case(
When(account_type=Client.GOLD, then=a_month_ago),
When(account_type=Client.PLATINUM, then=a_year_ago),
),
),
[("Jack Black", "P")],
transform=attrgetter("name", "account_type"),
)
def test_hash(self):
expression_1 = Case(
When(account_type__in=[Client.REGULAR, Client.GOLD], then=1),
default=2,
output_field=IntegerField(),
)
expression_2 = Case(
When(account_type__in=(Client.REGULAR, Client.GOLD), then=1),
default=2,
output_field=IntegerField(),
)
expression_3 = Case(
When(account_type__in=[Client.REGULAR, Client.GOLD], then=1), default=2
)
expression_4 = Case(
When(account_type__in=[Client.PLATINUM, Client.GOLD], then=2), default=1
)
self.assertEqual(hash(expression_1), hash(expression_2))
self.assertNotEqual(hash(expression_2), hash(expression_3))
self.assertNotEqual(hash(expression_1), hash(expression_4))
self.assertNotEqual(hash(expression_3), hash(expression_4))
class CaseWhenTests(SimpleTestCase):
def test_only_when_arguments(self):
msg = "Positional arguments must all be When objects."
with self.assertRaisesMessage(TypeError, msg):
Case(When(Q(pk__in=[])), object())
def test_invalid_when_constructor_args(self):
msg = (
"When() supports a Q object, a boolean expression, or lookups as "
"a condition."
)
with self.assertRaisesMessage(TypeError, msg):
When(condition=object())
with self.assertRaisesMessage(TypeError, msg):
When(condition=Value(1))
with self.assertRaisesMessage(TypeError, msg):
When(Value(1), string="1")
with self.assertRaisesMessage(TypeError, msg):
When()
def test_empty_q_object(self):
msg = "An empty Q() can't be used as a When() condition."
with self.assertRaisesMessage(ValueError, msg):
When(Q(), then=Value(True))
|
f54ef9a508b9ac6783a632721f9ac7cb96535d30fdd5f68ecbc7ec303059f173 | import datetime
import itertools
import unittest
from copy import copy
from unittest import mock
from django.core.exceptions import FieldError
from django.core.management.color import no_style
from django.db import (
DatabaseError,
DataError,
IntegrityError,
OperationalError,
connection,
)
from django.db.models import (
CASCADE,
PROTECT,
AutoField,
BigAutoField,
BigIntegerField,
BinaryField,
BooleanField,
CharField,
CheckConstraint,
DateField,
DateTimeField,
DecimalField,
DurationField,
F,
FloatField,
ForeignKey,
ForeignObject,
Index,
IntegerField,
JSONField,
ManyToManyField,
Model,
OneToOneField,
OrderBy,
PositiveIntegerField,
Q,
SlugField,
SmallAutoField,
SmallIntegerField,
TextField,
TimeField,
UniqueConstraint,
UUIDField,
Value,
)
from django.db.models.fields.json import KeyTextTransform
from django.db.models.functions import Abs, Cast, Collate, Lower, Random, Upper
from django.db.models.indexes import IndexExpression
from django.db.transaction import TransactionManagementError, atomic
from django.test import (
TransactionTestCase,
ignore_warnings,
skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext, isolate_apps, register_lookup
from django.utils.deprecation import RemovedInDjango51Warning
from .fields import CustomManyToManyField, InheritedManyToManyField, MediumBlobField
from .models import (
Author,
AuthorCharFieldWithIndex,
AuthorTextFieldWithIndex,
AuthorWithDefaultHeight,
AuthorWithEvenLongerName,
AuthorWithIndexedName,
AuthorWithUniqueName,
AuthorWithUniqueNameAndBirthday,
Book,
BookForeignObj,
BookWeak,
BookWithLongName,
BookWithO2O,
BookWithoutAuthor,
BookWithSlug,
IntegerPK,
Node,
Note,
NoteRename,
Tag,
TagM2MTest,
TagUniqueRename,
Thing,
UniqueTest,
new_apps,
)
class SchemaTests(TransactionTestCase):
"""
Tests for the schema-alteration code.
Be aware that these tests are more liable than most to false results,
as sometimes the code to check if a test has worked is almost as complex
as the code it is testing.
"""
available_apps = []
models = [
Author,
AuthorCharFieldWithIndex,
AuthorTextFieldWithIndex,
AuthorWithDefaultHeight,
AuthorWithEvenLongerName,
Book,
BookWeak,
BookWithLongName,
BookWithO2O,
BookWithSlug,
IntegerPK,
Node,
Note,
Tag,
TagM2MTest,
TagUniqueRename,
Thing,
UniqueTest,
]
# Utility functions
def setUp(self):
# local_models should contain test dependent model classes that will be
# automatically removed from the app cache on test tear down.
self.local_models = []
# isolated_local_models contains models that are in test methods
# decorated with @isolate_apps.
self.isolated_local_models = []
def tearDown(self):
# Delete any tables made for our models
self.delete_tables()
new_apps.clear_cache()
for model in new_apps.get_models():
model._meta._expire_cache()
if "schema" in new_apps.all_models:
for model in self.local_models:
for many_to_many in model._meta.many_to_many:
through = many_to_many.remote_field.through
if through and through._meta.auto_created:
del new_apps.all_models["schema"][through._meta.model_name]
del new_apps.all_models["schema"][model._meta.model_name]
if self.isolated_local_models:
with connection.schema_editor() as editor:
for model in self.isolated_local_models:
editor.delete_model(model)
def delete_tables(self):
"Deletes all model tables for our models for a clean test environment"
converter = connection.introspection.identifier_converter
with connection.schema_editor() as editor:
connection.disable_constraint_checking()
table_names = connection.introspection.table_names()
if connection.features.ignores_table_name_case:
table_names = [table_name.lower() for table_name in table_names]
for model in itertools.chain(SchemaTests.models, self.local_models):
tbl = converter(model._meta.db_table)
if connection.features.ignores_table_name_case:
tbl = tbl.lower()
if tbl in table_names:
editor.delete_model(model)
table_names.remove(tbl)
connection.enable_constraint_checking()
def column_classes(self, model):
with connection.cursor() as cursor:
columns = {
d[0]: (connection.introspection.get_field_type(d[1], d), d)
for d in connection.introspection.get_table_description(
cursor,
model._meta.db_table,
)
}
# SQLite has a different format for field_type
for name, (type, desc) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
return columns
def get_primary_key(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_primary_key_column(cursor, table)
def get_indexes(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return [
c["columns"][0]
for c in connection.introspection.get_constraints(
cursor, table
).values()
if c["index"] and len(c["columns"]) == 1
]
def get_uniques(self, table):
with connection.cursor() as cursor:
return [
c["columns"][0]
for c in connection.introspection.get_constraints(
cursor, table
).values()
if c["unique"] and len(c["columns"]) == 1
]
def get_constraints(self, table):
"""
Get the constraints on a table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def get_constraints_for_column(self, model, column_name):
constraints = self.get_constraints(model._meta.db_table)
constraints_for_column = []
for name, details in constraints.items():
if details["columns"] == [column_name]:
constraints_for_column.append(name)
return sorted(constraints_for_column)
def check_added_field_default(
self,
schema_editor,
model,
field,
field_name,
expected_default,
cast_function=None,
):
with connection.cursor() as cursor:
schema_editor.add_field(model, field)
cursor.execute(
"SELECT {} FROM {};".format(field_name, model._meta.db_table)
)
database_default = cursor.fetchall()[0][0]
if cast_function and type(database_default) != type(expected_default):
database_default = cast_function(database_default)
self.assertEqual(database_default, expected_default)
def get_constraints_count(self, table, column, fk_to):
"""
Return a dict with keys 'fks', 'uniques, and 'indexes' indicating the
number of foreign keys, unique constraints, and indexes on
`table`.`column`. The `fk_to` argument is a 2-tuple specifying the
expected foreign key relationship's (table, column).
"""
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, table)
counts = {"fks": 0, "uniques": 0, "indexes": 0}
for c in constraints.values():
if c["columns"] == [column]:
if c["foreign_key"] == fk_to:
counts["fks"] += 1
if c["unique"]:
counts["uniques"] += 1
elif c["index"]:
counts["indexes"] += 1
return counts
def get_column_collation(self, table, column):
with connection.cursor() as cursor:
return next(
f.collation
for f in connection.introspection.get_table_description(cursor, table)
if f.name == column
)
def assertIndexOrder(self, table, index, order):
constraints = self.get_constraints(table)
self.assertIn(index, constraints)
index_orders = constraints[index]["orders"]
self.assertTrue(
all(val == expected for val, expected in zip(index_orders, order))
)
def assertForeignKeyExists(self, model, column, expected_fk_table, field="id"):
"""
Fail if the FK constraint on `model.Meta.db_table`.`column` to
`expected_fk_table`.id doesn't exist.
"""
if not connection.features.can_introspect_foreign_keys:
return
constraints = self.get_constraints(model._meta.db_table)
constraint_fk = None
for details in constraints.values():
if details["columns"] == [column] and details["foreign_key"]:
constraint_fk = details["foreign_key"]
break
self.assertEqual(constraint_fk, (expected_fk_table, field))
def assertForeignKeyNotExists(self, model, column, expected_fk_table):
if not connection.features.can_introspect_foreign_keys:
return
with self.assertRaises(AssertionError):
self.assertForeignKeyExists(model, column, expected_fk_table)
# Tests
def test_creation_deletion(self):
"""
Tries creating a model's table, and then deleting it.
"""
with connection.schema_editor() as editor:
# Create the table
editor.create_model(Author)
# The table is there
list(Author.objects.all())
# Clean up that table
editor.delete_model(Author)
# No deferred SQL should be left over.
self.assertEqual(editor.deferred_sql, [])
# The table is gone
with self.assertRaises(DatabaseError):
list(Author.objects.all())
@skipUnlessDBFeature("supports_foreign_keys")
def test_fk(self):
"Creating tables out of FK order, then repointing, works"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Book)
editor.create_model(Author)
editor.create_model(Tag)
# Initial tables are there
list(Author.objects.all())
list(Book.objects.all())
# Make sure the FK constraint is present
with self.assertRaises(IntegrityError):
Book.objects.create(
author_id=1,
title="Much Ado About Foreign Keys",
pub_date=datetime.datetime.now(),
)
# Repoint the FK constraint
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Tag, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
self.assertForeignKeyExists(Book, "author_id", "schema_tag")
@skipUnlessDBFeature("can_create_inline_fk")
def test_inline_fk(self):
# Create some tables.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.create_model(Note)
self.assertForeignKeyNotExists(Note, "book_id", "schema_book")
# Add a foreign key from one to the other.
with connection.schema_editor() as editor:
new_field = ForeignKey(Book, CASCADE)
new_field.set_attributes_from_name("book")
editor.add_field(Note, new_field)
self.assertForeignKeyExists(Note, "book_id", "schema_book")
# Creating a FK field with a constraint uses a single statement without
# a deferred ALTER TABLE.
self.assertFalse(
[
sql
for sql in (str(statement) for statement in editor.deferred_sql)
if sql.startswith("ALTER TABLE") and "ADD CONSTRAINT" in sql
]
)
@skipUnlessDBFeature("can_create_inline_fk")
def test_add_inline_fk_update_data(self):
with connection.schema_editor() as editor:
editor.create_model(Node)
# Add an inline foreign key and update data in the same transaction.
new_field = ForeignKey(Node, CASCADE, related_name="new_fk", null=True)
new_field.set_attributes_from_name("new_parent_fk")
parent = Node.objects.create()
with connection.schema_editor() as editor:
editor.add_field(Node, new_field)
editor.execute("UPDATE schema_node SET new_parent_fk_id = %s;", [parent.pk])
assertIndex = (
self.assertIn
if connection.features.indexes_foreign_keys
else self.assertNotIn
)
assertIndex("new_parent_fk_id", self.get_indexes(Node._meta.db_table))
@skipUnlessDBFeature(
"can_create_inline_fk",
"allows_multiple_constraints_on_same_fields",
)
@isolate_apps("schema")
def test_add_inline_fk_index_update_data(self):
class Node(Model):
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Node)
# Add an inline foreign key, update data, and an index in the same
# transaction.
new_field = ForeignKey(Node, CASCADE, related_name="new_fk", null=True)
new_field.set_attributes_from_name("new_parent_fk")
parent = Node.objects.create()
with connection.schema_editor() as editor:
editor.add_field(Node, new_field)
Node._meta.add_field(new_field)
editor.execute("UPDATE schema_node SET new_parent_fk_id = %s;", [parent.pk])
editor.add_index(
Node, Index(fields=["new_parent_fk"], name="new_parent_inline_fk_idx")
)
self.assertIn("new_parent_fk_id", self.get_indexes(Node._meta.db_table))
@skipUnlessDBFeature("supports_foreign_keys")
def test_char_field_with_db_index_to_fk(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorCharFieldWithIndex)
# Change CharField to FK
old_field = AuthorCharFieldWithIndex._meta.get_field("char_field")
new_field = ForeignKey(Author, CASCADE, blank=True)
new_field.set_attributes_from_name("char_field")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorCharFieldWithIndex, old_field, new_field, strict=True
)
self.assertForeignKeyExists(
AuthorCharFieldWithIndex, "char_field_id", "schema_author"
)
@skipUnlessDBFeature("supports_foreign_keys")
@skipUnlessDBFeature("supports_index_on_text_field")
def test_text_field_with_db_index_to_fk(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorTextFieldWithIndex)
# Change TextField to FK
old_field = AuthorTextFieldWithIndex._meta.get_field("text_field")
new_field = ForeignKey(Author, CASCADE, blank=True)
new_field.set_attributes_from_name("text_field")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorTextFieldWithIndex, old_field, new_field, strict=True
)
self.assertForeignKeyExists(
AuthorTextFieldWithIndex, "text_field_id", "schema_author"
)
@isolate_apps("schema")
def test_char_field_pk_to_auto_field(self):
class Foo(Model):
id = CharField(max_length=255, primary_key=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Foo
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
@skipUnlessDBFeature("supports_foreign_keys")
def test_fk_to_proxy(self):
"Creating a FK to a proxy model creates database constraints."
class AuthorProxy(Author):
class Meta:
app_label = "schema"
apps = new_apps
proxy = True
class AuthorRef(Model):
author = ForeignKey(AuthorProxy, on_delete=CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [AuthorProxy, AuthorRef]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorRef)
self.assertForeignKeyExists(AuthorRef, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_fk_db_constraint(self):
"The db_constraint parameter is respected"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Author)
editor.create_model(BookWeak)
# Initial tables are there
list(Author.objects.all())
list(Tag.objects.all())
list(BookWeak.objects.all())
self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author")
# Make a db_constraint=False FK
new_field = ForeignKey(Tag, CASCADE, db_constraint=False)
new_field.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
self.assertForeignKeyNotExists(Author, "tag_id", "schema_tag")
# Alter to one with a constraint
new_field2 = ForeignKey(Tag, CASCADE)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
self.assertForeignKeyExists(Author, "tag_id", "schema_tag")
# Alter to one without a constraint again
new_field2 = ForeignKey(Tag, CASCADE)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field2, new_field, strict=True)
self.assertForeignKeyNotExists(Author, "tag_id", "schema_tag")
@isolate_apps("schema")
def test_no_db_constraint_added_during_primary_key_change(self):
"""
When a primary key that's pointed to by a ForeignKey with
db_constraint=False is altered, a foreign key constraint isn't added.
"""
class Author(Model):
class Meta:
app_label = "schema"
class BookWeak(Model):
author = ForeignKey(Author, CASCADE, db_constraint=False)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWeak)
self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author")
old_field = Author._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = Author
new_field.set_attributes_from_name("id")
# @isolate_apps() and inner models are needed to have the model
# relations populated, otherwise this doesn't act as a regression test.
self.assertEqual(len(new_field.model._meta.related_objects), 1)
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author")
def _test_m2m_db_constraint(self, M2MFieldClass):
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(LocalAuthorWithM2M)
# Initial tables are there
list(LocalAuthorWithM2M.objects.all())
list(Tag.objects.all())
# Make a db_constraint=False FK
new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False)
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
self.assertForeignKeyNotExists(
new_field.remote_field.through, "tag_id", "schema_tag"
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_m2m_db_constraint(self):
self._test_m2m_db_constraint(ManyToManyField)
@skipUnlessDBFeature("supports_foreign_keys")
def test_m2m_db_constraint_custom(self):
self._test_m2m_db_constraint(CustomManyToManyField)
@skipUnlessDBFeature("supports_foreign_keys")
def test_m2m_db_constraint_inherited(self):
self._test_m2m_db_constraint(InheritedManyToManyField)
def test_add_field(self):
"""
Tests adding fields to models
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add the new field
new_field = IntegerField(null=True)
new_field.set_attributes_from_name("age")
with CaptureQueriesContext(
connection
) as ctx, connection.schema_editor() as editor:
editor.add_field(Author, new_field)
drop_default_sql = editor.sql_alter_column_no_default % {
"column": editor.quote_name(new_field.name),
}
self.assertFalse(
any(drop_default_sql in query["sql"] for query in ctx.captured_queries)
)
# Table is not rebuilt.
self.assertIs(
any("CREATE TABLE" in query["sql"] for query in ctx.captured_queries), False
)
self.assertIs(
any("DROP TABLE" in query["sql"] for query in ctx.captured_queries), False
)
columns = self.column_classes(Author)
self.assertEqual(
columns["age"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertTrue(columns["age"][1][6])
def test_add_field_remove_field(self):
"""
Adding a field and removing it removes all deferred sql referring to it.
"""
with connection.schema_editor() as editor:
# Create a table with a unique constraint on the slug field.
editor.create_model(Tag)
# Remove the slug column.
editor.remove_field(Tag, Tag._meta.get_field("slug"))
self.assertEqual(editor.deferred_sql, [])
def test_add_field_temp_default(self):
"""
Tests adding fields to models with a temporary default
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = CharField(max_length=30, default="Godwin")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertEqual(
columns["surname"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(
columns["surname"][1][6],
connection.features.interprets_empty_strings_as_nulls,
)
def test_add_field_temp_default_boolean(self):
"""
Tests adding fields to models with a temporary default where
the default is False. (#21783)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = BooleanField(default=False)
new_field.set_attributes_from_name("awesome")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# BooleanField are stored as TINYINT(1) on MySQL.
field_type = columns["awesome"][0]
self.assertEqual(
field_type, connection.features.introspected_field_types["BooleanField"]
)
def test_add_field_default_transform(self):
"""
Tests adding fields to models with a default that is not directly
valid in the database (#22581)
"""
class TestTransformField(IntegerField):
# Weird field that saves the count of items in its value
def get_default(self):
return self.default
def get_prep_value(self, value):
if value is None:
return 0
return len(value)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add the field with a default it needs to cast (to string in this case)
new_field = TestTransformField(default={1: 2})
new_field.set_attributes_from_name("thing")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is there
columns = self.column_classes(Author)
field_type, field_info = columns["thing"]
self.assertEqual(
field_type, connection.features.introspected_field_types["IntegerField"]
)
# Make sure the values were transformed correctly
self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2)
def test_add_field_o2o_nullable(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Note)
new_field = OneToOneField(Note, CASCADE, null=True)
new_field.set_attributes_from_name("note")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertIn("note_id", columns)
self.assertTrue(columns["note_id"][1][6])
def test_add_field_binary(self):
"""
Tests binary fields get a sane default (#22851)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field
new_field = BinaryField(blank=True)
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# MySQL annoyingly uses the same backend, so it'll come back as one of
# these two types.
self.assertIn(columns["bits"][0], ("BinaryField", "TextField"))
def test_add_field_durationfield_with_default(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
new_field = DurationField(default=datetime.timedelta(minutes=10))
new_field.set_attributes_from_name("duration")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertEqual(
columns["duration"][0],
connection.features.introspected_field_types["DurationField"],
)
@unittest.skipUnless(connection.vendor == "mysql", "MySQL specific")
def test_add_binaryfield_mediumblob(self):
"""
Test adding a custom-sized binary field on MySQL (#24846).
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field with default
new_field = MediumBlobField(blank=True, default=b"123")
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# Introspection treats BLOBs as TextFields
self.assertEqual(columns["bits"][0], "TextField")
@isolate_apps("schema")
def test_add_auto_field(self):
class AddAutoFieldModel(Model):
name = CharField(max_length=255, primary_key=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(AddAutoFieldModel)
self.isolated_local_models = [AddAutoFieldModel]
old_field = AddAutoFieldModel._meta.get_field("name")
new_field = CharField(max_length=255)
new_field.set_attributes_from_name("name")
new_field.model = AddAutoFieldModel
with connection.schema_editor() as editor:
editor.alter_field(AddAutoFieldModel, old_field, new_field)
new_auto_field = AutoField(primary_key=True)
new_auto_field.set_attributes_from_name("id")
new_auto_field.model = AddAutoFieldModel()
with connection.schema_editor() as editor:
editor.add_field(AddAutoFieldModel, new_auto_field)
# Crashes on PostgreSQL when the GENERATED BY suffix is missing.
AddAutoFieldModel.objects.create(name="test")
def test_remove_field(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
with CaptureQueriesContext(connection) as ctx:
editor.remove_field(Author, Author._meta.get_field("name"))
columns = self.column_classes(Author)
self.assertNotIn("name", columns)
if getattr(connection.features, "can_alter_table_drop_column", True):
# Table is not rebuilt.
self.assertIs(
any("CREATE TABLE" in query["sql"] for query in ctx.captured_queries),
False,
)
self.assertIs(
any("DROP TABLE" in query["sql"] for query in ctx.captured_queries),
False,
)
def test_remove_indexed_field(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorCharFieldWithIndex)
with connection.schema_editor() as editor:
editor.remove_field(
AuthorCharFieldWithIndex,
AuthorCharFieldWithIndex._meta.get_field("char_field"),
)
columns = self.column_classes(AuthorCharFieldWithIndex)
self.assertNotIn("char_field", columns)
def test_alter(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(
bool(columns["name"][1][6]),
bool(connection.features.interprets_empty_strings_as_nulls),
)
# Alter the name field to a TextField
old_field = Author._meta.get_field("name")
new_field = TextField(null=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
self.assertEqual(columns["name"][0], "TextField")
self.assertTrue(columns["name"][1][6])
# Change nullability again
new_field2 = TextField(null=False)
new_field2.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
columns = self.column_classes(Author)
self.assertEqual(columns["name"][0], "TextField")
self.assertEqual(
bool(columns["name"][1][6]),
bool(connection.features.interprets_empty_strings_as_nulls),
)
def test_alter_auto_field_to_integer_field(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change AutoField to IntegerField
old_field = Author._meta.get_field("id")
new_field = IntegerField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Now that ID is an IntegerField, the database raises an error if it
# isn't provided.
if not connection.features.supports_unspecified_pk:
with self.assertRaises(DatabaseError):
Author.objects.create()
def test_alter_auto_field_to_char_field(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change AutoField to CharField
old_field = Author._meta.get_field("id")
new_field = CharField(primary_key=True, max_length=50)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
@isolate_apps("schema")
def test_alter_auto_field_quoted_db_column(self):
class Foo(Model):
id = AutoField(primary_key=True, db_column='"quoted_id"')
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = Foo
new_field.db_column = '"quoted_id"'
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
Foo.objects.create()
def test_alter_not_unique_field_to_primary_key(self):
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change UUIDField to primary key.
old_field = Author._meta.get_field("uuid")
new_field = UUIDField(primary_key=True)
new_field.set_attributes_from_name("uuid")
new_field.model = Author
with connection.schema_editor() as editor:
editor.remove_field(Author, Author._meta.get_field("id"))
editor.alter_field(Author, old_field, new_field, strict=True)
# Redundant unique constraint is not added.
count = self.get_constraints_count(
Author._meta.db_table,
Author._meta.get_field("uuid").column,
None,
)
self.assertLessEqual(count["uniques"], 1)
@isolate_apps("schema")
def test_alter_primary_key_quoted_db_table(self):
class Foo(Model):
class Meta:
app_label = "schema"
db_table = '"foo"'
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = Foo
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
Foo.objects.create()
def test_alter_text_field(self):
# Regression for "BLOB/TEXT column 'info' can't have a default value")
# on MySQL.
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = TextField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
def test_alter_text_field_to_not_null_with_default_value(self):
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("address")
new_field = TextField(blank=True, default="", null=False)
new_field.set_attributes_from_name("address")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
@skipUnlessDBFeature("can_defer_constraint_checks", "can_rollback_ddl")
def test_alter_fk_checks_deferred_constraints(self):
"""
#25492 - Altering a foreign key's structure and data in the same
transaction.
"""
with connection.schema_editor() as editor:
editor.create_model(Node)
old_field = Node._meta.get_field("parent")
new_field = ForeignKey(Node, CASCADE)
new_field.set_attributes_from_name("parent")
parent = Node.objects.create()
with connection.schema_editor() as editor:
# Update the parent FK to create a deferred constraint check.
Node.objects.update(parent=parent)
editor.alter_field(Node, old_field, new_field, strict=True)
@isolate_apps("schema")
def test_alter_null_with_default_value_deferred_constraints(self):
class Publisher(Model):
class Meta:
app_label = "schema"
class Article(Model):
publisher = ForeignKey(Publisher, CASCADE)
title = CharField(max_length=50, null=True)
description = CharField(max_length=100, null=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Publisher)
editor.create_model(Article)
self.isolated_local_models = [Article, Publisher]
publisher = Publisher.objects.create()
Article.objects.create(publisher=publisher)
old_title = Article._meta.get_field("title")
new_title = CharField(max_length=50, null=False, default="")
new_title.set_attributes_from_name("title")
old_description = Article._meta.get_field("description")
new_description = CharField(max_length=100, null=False, default="")
new_description.set_attributes_from_name("description")
with connection.schema_editor() as editor:
editor.alter_field(Article, old_title, new_title, strict=True)
editor.alter_field(Article, old_description, new_description, strict=True)
def test_alter_text_field_to_date_field(self):
"""
#25002 - Test conversion of text field to date field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info="1988-05-05")
old_field = Note._meta.get_field("info")
new_field = DateField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns["info"][1][6])
def test_alter_text_field_to_datetime_field(self):
"""
#25002 - Test conversion of text field to datetime field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info="1988-05-05 3:16:17.4567")
old_field = Note._meta.get_field("info")
new_field = DateTimeField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns["info"][1][6])
def test_alter_text_field_to_time_field(self):
"""
#25002 - Test conversion of text field to time field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info="3:16:17.4567")
old_field = Note._meta.get_field("info")
new_field = TimeField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns["info"][1][6])
@skipIfDBFeature("interprets_empty_strings_as_nulls")
def test_alter_textual_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=50)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
@skipUnlessDBFeature("interprets_empty_strings_as_nulls")
def test_alter_textual_field_not_null_to_null(self):
"""
Nullability for textual fields is preserved on databases that
interpret empty strings as NULLs.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
columns = self.column_classes(Author)
# Field is nullable.
self.assertTrue(columns["uuid"][1][6])
# Change to NOT NULL.
old_field = Author._meta.get_field("uuid")
new_field = SlugField(null=False, blank=True)
new_field.set_attributes_from_name("uuid")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
# Nullability is preserved.
self.assertTrue(columns["uuid"][1][6])
def test_alter_numeric_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug="aaa")
old_field = UniqueTest._meta.get_field("year")
new_field = BigIntegerField()
new_field.set_attributes_from_name("year")
with connection.schema_editor() as editor:
editor.alter_field(UniqueTest, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug="bbb")
def test_alter_null_to_not_null(self):
"""
#23609 - Tests handling of default values when altering from NULL to NOT NULL.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertTrue(columns["height"][1][6])
# Create some test data
Author.objects.create(name="Not null author", height=12)
Author.objects.create(name="Null author")
# Verify null value
self.assertEqual(Author.objects.get(name="Not null author").height, 12)
self.assertIsNone(Author.objects.get(name="Null author").height)
# Alter the height field to NOT NULL with default
old_field = Author._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
self.assertFalse(columns["height"][1][6])
# Verify default value
self.assertEqual(Author.objects.get(name="Not null author").height, 12)
self.assertEqual(Author.objects.get(name="Null author").height, 42)
def test_alter_charfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_nulls when changing a CharField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change the CharField to null
old_field = Author._meta.get_field("name")
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_char_field_decrease_length(self):
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
Author.objects.create(name="x" * 255)
# Change max_length of CharField.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
msg = "value too long for type character varying(254)"
with self.assertRaisesMessage(DataError, msg):
editor.alter_field(Author, old_field, new_field, strict=True)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_with_custom_db_type(self):
from django.contrib.postgres.fields import ArrayField
class Foo(Model):
field = ArrayField(CharField(max_length=255))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("field")
new_field = ArrayField(CharField(max_length=16))
new_field.set_attributes_from_name("field")
new_field.model = Foo
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_array_field_decrease_base_field_length(self):
from django.contrib.postgres.fields import ArrayField
class ArrayModel(Model):
field = ArrayField(CharField(max_length=16))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(ArrayModel)
self.isolated_local_models = [ArrayModel]
ArrayModel.objects.create(field=["x" * 16])
old_field = ArrayModel._meta.get_field("field")
new_field = ArrayField(CharField(max_length=15))
new_field.set_attributes_from_name("field")
new_field.model = ArrayModel
with connection.schema_editor() as editor:
msg = "value too long for type character varying(15)"
with self.assertRaisesMessage(DataError, msg):
editor.alter_field(ArrayModel, old_field, new_field, strict=True)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_array_field_decrease_nested_base_field_length(self):
from django.contrib.postgres.fields import ArrayField
class ArrayModel(Model):
field = ArrayField(ArrayField(CharField(max_length=16)))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(ArrayModel)
self.isolated_local_models = [ArrayModel]
ArrayModel.objects.create(field=[["x" * 16]])
old_field = ArrayModel._meta.get_field("field")
new_field = ArrayField(ArrayField(CharField(max_length=15)))
new_field.set_attributes_from_name("field")
new_field.model = ArrayModel
with connection.schema_editor() as editor:
msg = "value too long for type character varying(15)"
with self.assertRaisesMessage(DataError, msg):
editor.alter_field(ArrayModel, old_field, new_field, strict=True)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
@skipUnlessDBFeature(
"supports_collation_on_charfield",
"supports_non_deterministic_collations",
)
def test_db_collation_arrayfield(self):
from django.contrib.postgres.fields import ArrayField
ci_collation = "case_insensitive"
cs_collation = "en-x-icu"
def drop_collation():
with connection.cursor() as cursor:
cursor.execute(f"DROP COLLATION IF EXISTS {ci_collation}")
with connection.cursor() as cursor:
cursor.execute(
f"CREATE COLLATION IF NOT EXISTS {ci_collation} (provider = icu, "
f"locale = 'und-u-ks-level2', deterministic = false)"
)
self.addCleanup(drop_collation)
class ArrayModel(Model):
field = ArrayField(CharField(max_length=16, db_collation=ci_collation))
class Meta:
app_label = "schema"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(ArrayModel)
self.isolated_local_models = [ArrayModel]
self.assertEqual(
self.get_column_collation(ArrayModel._meta.db_table, "field"),
ci_collation,
)
# Alter collation.
old_field = ArrayModel._meta.get_field("field")
new_field_cs = ArrayField(CharField(max_length=16, db_collation=cs_collation))
new_field_cs.set_attributes_from_name("field")
new_field_cs.model = ArrayField
with connection.schema_editor() as editor:
editor.alter_field(ArrayModel, old_field, new_field_cs, strict=True)
self.assertEqual(
self.get_column_collation(ArrayModel._meta.db_table, "field"),
cs_collation,
)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
@skipUnlessDBFeature(
"supports_collation_on_charfield",
"supports_non_deterministic_collations",
)
def test_unique_with_collation_charfield(self):
ci_collation = "case_insensitive"
def drop_collation():
with connection.cursor() as cursor:
cursor.execute(f"DROP COLLATION IF EXISTS {ci_collation}")
with connection.cursor() as cursor:
cursor.execute(
f"CREATE COLLATION IF NOT EXISTS {ci_collation} (provider = icu, "
f"locale = 'und-u-ks-level2', deterministic = false)"
)
self.addCleanup(drop_collation)
class CiCharModel(Model):
field = CharField(max_length=16, db_collation=ci_collation, unique=True)
class Meta:
app_label = "schema"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(CiCharModel)
self.isolated_local_models = [CiCharModel]
self.assertEqual(
self.get_column_collation(CiCharModel._meta.db_table, "field"),
ci_collation,
)
self.assertIn("field", self.get_uniques(CiCharModel._meta.db_table))
def test_alter_textfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_nulls when changing a TextField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
# Change the TextField to null
old_field = Note._meta.get_field("info")
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
def test_alter_null_to_not_null_keeping_default(self):
"""
#23738 - Can change a nullable field with default to non-nullable
with the same default.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
# Ensure the field is right to begin with
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertTrue(columns["height"][1][6])
# Alter the height field to NOT NULL keeping the previous default
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorWithDefaultHeight, old_field, new_field, strict=True
)
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertFalse(columns["height"][1][6])
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_fk(self):
"""
Tests altering of FKs
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
# Alter the FK
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE, editable=False)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_to_fk(self):
"""
#24447 - Tests adding a FK constraint for an existing column
"""
class LocalBook(Model):
author = IntegerField()
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalBook]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBook)
# Ensure no FK constraint exists
constraints = self.get_constraints(LocalBook._meta.db_table)
for details in constraints.values():
if details["foreign_key"]:
self.fail(
"Found an unexpected FK constraint to %s" % details["columns"]
)
old_field = LocalBook._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(LocalBook, old_field, new_field, strict=True)
self.assertForeignKeyExists(LocalBook, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_alter_o2o_to_fk(self):
"""
#24163 - Tests altering of OneToOneField to ForeignKey
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
# Ensure the field is right to begin with
columns = self.column_classes(BookWithO2O)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is unique
author = Author.objects.create(name="Joe")
BookWithO2O.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
BookWithO2O.objects.all().delete()
self.assertForeignKeyExists(BookWithO2O, "author_id", "schema_author")
# Alter the OneToOneField to ForeignKey
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is not unique anymore
Book.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
Book.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_alter_fk_to_o2o(self):
"""
#24163 - Tests altering of ForeignKey to OneToOneField
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is not unique
author = Author.objects.create(name="Joe")
Book.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
Book.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
Book.objects.all().delete()
self.assertForeignKeyExists(Book, "author_id", "schema_author")
# Alter the ForeignKey to OneToOneField
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
columns = self.column_classes(BookWithO2O)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is unique now
BookWithO2O.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
self.assertForeignKeyExists(BookWithO2O, "author_id", "schema_author")
def test_alter_field_fk_to_o2o(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
expected_indexes = 1 if connection.features.indexes_foreign_keys else 0
# Check the index is right to begin with.
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(
counts,
{"fks": expected_fks, "uniques": 0, "indexes": expected_indexes},
)
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field)
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The index on ForeignKey is replaced with a unique constraint for
# OneToOneField.
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
def test_alter_field_fk_keeps_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
expected_indexes = 1 if connection.features.indexes_foreign_keys else 0
# Check the index is right to begin with.
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(
counts,
{"fks": expected_fks, "uniques": 0, "indexes": expected_indexes},
)
old_field = Book._meta.get_field("author")
# on_delete changed from CASCADE.
new_field = ForeignKey(Author, PROTECT)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The index remains.
self.assertEqual(
counts,
{"fks": expected_fks, "uniques": 0, "indexes": expected_indexes},
)
def test_alter_field_o2o_to_fk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
# Check the unique constraint is right to begin with.
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field)
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The unique constraint on OneToOneField is replaced with an index for
# ForeignKey.
self.assertEqual(counts, {"fks": expected_fks, "uniques": 0, "indexes": 1})
def test_alter_field_o2o_keeps_unique(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
# Check the unique constraint is right to begin with.
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
old_field = BookWithO2O._meta.get_field("author")
# on_delete changed from CASCADE.
new_field = OneToOneField(Author, PROTECT)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The unique constraint remains.
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
@skipUnlessDBFeature("ignores_table_name_case")
def test_alter_db_table_case(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Alter the case of the table
old_table_name = Author._meta.db_table
with connection.schema_editor() as editor:
editor.alter_db_table(Author, old_table_name, old_table_name.upper())
def test_alter_implicit_id_to_explicit(self):
"""
Should be able to convert an implicit "id" field to an explicit "id"
primary key field.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# This will fail if DROP DEFAULT is inadvertently executed on this
# field which drops the id sequence, at least on PostgreSQL.
Author.objects.create(name="Foo")
Author.objects.create(name="Bar")
def test_alter_autofield_pk_to_bigautofield_pk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
Author.objects.create(name="Foo", pk=1)
with connection.cursor() as cursor:
sequence_reset_sqls = connection.ops.sequence_reset_sql(
no_style(), [Author]
)
if sequence_reset_sqls:
cursor.execute(sequence_reset_sqls[0])
self.assertIsNotNone(Author.objects.create(name="Bar"))
def test_alter_autofield_pk_to_smallautofield_pk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = SmallAutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
Author.objects.create(name="Foo", pk=1)
with connection.cursor() as cursor:
sequence_reset_sqls = connection.ops.sequence_reset_sql(
no_style(), [Author]
)
if sequence_reset_sqls:
cursor.execute(sequence_reset_sqls[0])
self.assertIsNotNone(Author.objects.create(name="Bar"))
def test_alter_int_pk_to_autofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
AutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field("i")
new_field = AutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# A model representing the updated model.
class IntegerPKToAutoField(Model):
i = AutoField(primary_key=True)
j = IntegerField(unique=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = IntegerPK._meta.db_table
# An id (i) is generated by the database.
obj = IntegerPKToAutoField.objects.create(j=1)
self.assertIsNotNone(obj.i)
def test_alter_int_pk_to_bigautofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
BigAutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field("i")
new_field = BigAutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# A model representing the updated model.
class IntegerPKToBigAutoField(Model):
i = BigAutoField(primary_key=True)
j = IntegerField(unique=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = IntegerPK._meta.db_table
# An id (i) is generated by the database.
obj = IntegerPKToBigAutoField.objects.create(j=1)
self.assertIsNotNone(obj.i)
@isolate_apps("schema")
def test_alter_smallint_pk_to_smallautofield_pk(self):
"""
Should be able to rename an SmallIntegerField(primary_key=True) to
SmallAutoField(primary_key=True).
"""
class SmallIntegerPK(Model):
i = SmallIntegerField(primary_key=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(SmallIntegerPK)
self.isolated_local_models = [SmallIntegerPK]
old_field = SmallIntegerPK._meta.get_field("i")
new_field = SmallAutoField(primary_key=True)
new_field.model = SmallIntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(SmallIntegerPK, old_field, new_field, strict=True)
def test_alter_int_pk_to_int_unique(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
IntegerField(unique=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
# Delete the old PK
old_field = IntegerPK._meta.get_field("i")
new_field = IntegerField(unique=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# The primary key constraint is gone. Result depends on database:
# 'id' for SQLite, None for others (must not be 'i').
self.assertIn(self.get_primary_key(IntegerPK._meta.db_table), ("id", None))
# Set up a model class as it currently stands. The original IntegerPK
# class is now out of date and some backends make use of the whole
# model class when modifying a field (such as sqlite3 when remaking a
# table) so an outdated model class leads to incorrect results.
class Transitional(Model):
i = IntegerField(unique=True)
j = IntegerField(unique=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = "INTEGERPK"
# model requires a new PK
old_field = Transitional._meta.get_field("j")
new_field = IntegerField(primary_key=True)
new_field.model = Transitional
new_field.set_attributes_from_name("j")
with connection.schema_editor() as editor:
editor.alter_field(Transitional, old_field, new_field, strict=True)
# Create a model class representing the updated model.
class IntegerUnique(Model):
i = IntegerField(unique=True)
j = IntegerField(primary_key=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = "INTEGERPK"
# Ensure unique constraint works.
IntegerUnique.objects.create(i=1, j=1)
with self.assertRaises(IntegrityError):
IntegerUnique.objects.create(i=1, j=2)
def test_rename(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertNotIn("display_name", columns)
# Alter the name field's name
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("display_name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
self.assertEqual(
columns["display_name"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertNotIn("name", columns)
@isolate_apps("schema")
def test_rename_referenced_field(self):
class Author(Model):
name = CharField(max_length=255, unique=True)
class Meta:
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE, to_field="name")
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("renamed")
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
editor.alter_field(Author, Author._meta.get_field("name"), new_field)
# Ensure the foreign key reference was updated.
self.assertForeignKeyExists(Book, "author_id", "schema_author", "renamed")
@skipIfDBFeature("interprets_empty_strings_as_nulls")
def test_rename_keep_null_status(self):
"""
Renaming a field shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = TextField()
new_field.set_attributes_from_name("detail_info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns["detail_info"][0], "TextField")
self.assertNotIn("info", columns)
with self.assertRaises(IntegrityError):
NoteRename.objects.create(detail_info=None)
def _test_m2m_create(self, M2MFieldClass):
"""
Tests M2M fields on models during creation
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author, CASCADE)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2M)
# Ensure there is now an m2m table there
columns = self.column_classes(
LocalBookWithM2M._meta.get_field("tags").remote_field.through
)
self.assertEqual(
columns["tagm2mtest_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
def test_m2m_create(self):
self._test_m2m_create(ManyToManyField)
def test_m2m_create_custom(self):
self._test_m2m_create(CustomManyToManyField)
def test_m2m_create_inherited(self):
self._test_m2m_create(InheritedManyToManyField)
def _test_m2m_create_through(self, M2MFieldClass):
"""
Tests M2M fields on models during creation with through models
"""
class LocalTagThrough(Model):
book = ForeignKey("schema.LocalBookWithM2MThrough", CASCADE)
tag = ForeignKey("schema.TagM2MTest", CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
class LocalBookWithM2MThrough(Model):
tags = M2MFieldClass(
"TagM2MTest", related_name="books", through=LocalTagThrough
)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalTagThrough, LocalBookWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalTagThrough)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2MThrough)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalTagThrough)
self.assertEqual(
columns["book_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertEqual(
columns["tag_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
def test_m2m_create_through(self):
self._test_m2m_create_through(ManyToManyField)
def test_m2m_create_through_custom(self):
self._test_m2m_create_through(CustomManyToManyField)
def test_m2m_create_through_inherited(self):
self._test_m2m_create_through(InheritedManyToManyField)
def test_m2m_through_remove(self):
class LocalAuthorNoteThrough(Model):
book = ForeignKey("schema.Author", CASCADE)
tag = ForeignKey("self", CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
class LocalNoteWithM2MThrough(Model):
authors = ManyToManyField("schema.Author", through=LocalAuthorNoteThrough)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorNoteThrough, LocalNoteWithM2MThrough]
# Create the tables.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalAuthorNoteThrough)
editor.create_model(LocalNoteWithM2MThrough)
# Remove the through parameter.
old_field = LocalNoteWithM2MThrough._meta.get_field("authors")
new_field = ManyToManyField("Author")
new_field.set_attributes_from_name("authors")
msg = (
f"Cannot alter field {old_field} into {new_field} - they are not "
f"compatible types (you cannot alter to or from M2M fields, or add or "
f"remove through= on M2M fields)"
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
editor.alter_field(LocalNoteWithM2MThrough, old_field, new_field)
def _test_m2m(self, M2MFieldClass):
"""
Tests adding/removing M2M fields on models
"""
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorWithM2M)
editor.create_model(TagM2MTest)
# Create an M2M field
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors")
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Ensure there's no m2m table there
with self.assertRaises(DatabaseError):
self.column_classes(new_field.remote_field.through)
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
# Ensure there is now an m2m table there
columns = self.column_classes(new_field.remote_field.through)
self.assertEqual(
columns["tagm2mtest_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# "Alter" the field. This should not rename the DB table to itself.
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2M, new_field, new_field, strict=True)
# Remove the M2M table again
with connection.schema_editor() as editor:
editor.remove_field(LocalAuthorWithM2M, new_field)
# Ensure there's no m2m table there
with self.assertRaises(DatabaseError):
self.column_classes(new_field.remote_field.through)
# Make sure the model state is coherent with the table one now that
# we've removed the tags field.
opts = LocalAuthorWithM2M._meta
opts.local_many_to_many.remove(new_field)
del new_apps.all_models["schema"][
new_field.remote_field.through._meta.model_name
]
opts._expire_cache()
def test_m2m(self):
self._test_m2m(ManyToManyField)
def test_m2m_custom(self):
self._test_m2m(CustomManyToManyField)
def test_m2m_inherited(self):
self._test_m2m(InheritedManyToManyField)
def _test_m2m_through_alter(self, M2MFieldClass):
"""
Tests altering M2Ms with explicit through models (should no-op)
"""
class LocalAuthorTag(Model):
author = ForeignKey("schema.LocalAuthorWithM2MThrough", CASCADE)
tag = ForeignKey("schema.TagM2MTest", CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
class LocalAuthorWithM2MThrough(Model):
name = CharField(max_length=255)
tags = M2MFieldClass(
"schema.TagM2MTest", related_name="authors", through=LocalAuthorTag
)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorTag)
editor.create_model(LocalAuthorWithM2MThrough)
editor.create_model(TagM2MTest)
# Ensure the m2m table is there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
# "Alter" the field's blankness. This should not actually do anything.
old_field = LocalAuthorWithM2MThrough._meta.get_field("tags")
new_field = M2MFieldClass(
"schema.TagM2MTest", related_name="authors", through=LocalAuthorTag
)
new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags")
with connection.schema_editor() as editor:
editor.alter_field(
LocalAuthorWithM2MThrough, old_field, new_field, strict=True
)
# Ensure the m2m table is still there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
def test_m2m_through_alter(self):
self._test_m2m_through_alter(ManyToManyField)
def test_m2m_through_alter_custom(self):
self._test_m2m_through_alter(CustomManyToManyField)
def test_m2m_through_alter_inherited(self):
self._test_m2m_through_alter(InheritedManyToManyField)
def _test_m2m_repoint(self, M2MFieldClass):
"""
Tests repointing M2M fields
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author, CASCADE)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBookWithM2M)
editor.create_model(TagM2MTest)
editor.create_model(UniqueTest)
# Ensure the M2M exists and points to TagM2MTest
if connection.features.supports_foreign_keys:
self.assertForeignKeyExists(
LocalBookWithM2M._meta.get_field("tags").remote_field.through,
"tagm2mtest_id",
"schema_tagm2mtest",
)
# Repoint the M2M
old_field = LocalBookWithM2M._meta.get_field("tags")
new_field = M2MFieldClass(UniqueTest)
new_field.contribute_to_class(LocalBookWithM2M, "uniques")
with connection.schema_editor() as editor:
editor.alter_field(LocalBookWithM2M, old_field, new_field, strict=True)
# Ensure old M2M is gone
with self.assertRaises(DatabaseError):
self.column_classes(
LocalBookWithM2M._meta.get_field("tags").remote_field.through
)
# This model looks like the new model and is used for teardown.
opts = LocalBookWithM2M._meta
opts.local_many_to_many.remove(old_field)
# Ensure the new M2M exists and points to UniqueTest
if connection.features.supports_foreign_keys:
self.assertForeignKeyExists(
new_field.remote_field.through, "uniquetest_id", "schema_uniquetest"
)
def test_m2m_repoint(self):
self._test_m2m_repoint(ManyToManyField)
def test_m2m_repoint_custom(self):
self._test_m2m_repoint(CustomManyToManyField)
def test_m2m_repoint_inherited(self):
self._test_m2m_repoint(InheritedManyToManyField)
@isolate_apps("schema")
def test_m2m_rename_field_in_target_model(self):
class LocalTagM2MTest(Model):
title = CharField(max_length=255)
class Meta:
app_label = "schema"
class LocalM2M(Model):
tags = ManyToManyField(LocalTagM2MTest)
class Meta:
app_label = "schema"
# Create the tables.
with connection.schema_editor() as editor:
editor.create_model(LocalM2M)
editor.create_model(LocalTagM2MTest)
self.isolated_local_models = [LocalM2M, LocalTagM2MTest]
# Ensure the m2m table is there.
self.assertEqual(len(self.column_classes(LocalM2M)), 1)
# Alter a field in LocalTagM2MTest.
old_field = LocalTagM2MTest._meta.get_field("title")
new_field = CharField(max_length=254)
new_field.contribute_to_class(LocalTagM2MTest, "title1")
# @isolate_apps() and inner models are needed to have the model
# relations populated, otherwise this doesn't act as a regression test.
self.assertEqual(len(new_field.model._meta.related_objects), 1)
with connection.schema_editor() as editor:
editor.alter_field(LocalTagM2MTest, old_field, new_field, strict=True)
# Ensure the m2m table is still there.
self.assertEqual(len(self.column_classes(LocalM2M)), 1)
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
def test_check_constraints(self):
"""
Tests creating/deleting CHECK constraints
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the constraint exists
constraints = self.get_constraints(Author._meta.db_table)
if not any(
details["columns"] == ["height"] and details["check"]
for details in constraints.values()
):
self.fail("No check constraint for height found")
# Alter the column to remove it
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for details in constraints.values():
if details["columns"] == ["height"] and details["check"]:
self.fail("Check constraint for height found")
# Alter the column to re-add it
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
if not any(
details["columns"] == ["height"] and details["check"]
for details in constraints.values()
):
self.fail("No check constraint for height found")
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
@isolate_apps("schema")
def test_check_constraint_timedelta_param(self):
class DurationModel(Model):
duration = DurationField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(DurationModel)
self.isolated_local_models = [DurationModel]
constraint_name = "duration_gte_5_minutes"
constraint = CheckConstraint(
check=Q(duration__gt=datetime.timedelta(minutes=5)),
name=constraint_name,
)
DurationModel._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(DurationModel, constraint)
constraints = self.get_constraints(DurationModel._meta.db_table)
self.assertIn(constraint_name, constraints)
with self.assertRaises(IntegrityError), atomic():
DurationModel.objects.create(duration=datetime.timedelta(minutes=4))
DurationModel.objects.create(duration=datetime.timedelta(minutes=10))
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
def test_remove_field_check_does_not_remove_meta_constraints(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the custom check constraint
constraint = CheckConstraint(
check=Q(height__gte=0), name="author_height_gte_0_check"
)
custom_constraint_name = constraint.name
Author._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
# Ensure the constraints exist
constraints = self.get_constraints(Author._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["height"]
and details["check"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Alter the column to remove field check
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["height"]
and details["check"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 0)
# Alter the column to re-add field check
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["height"]
and details["check"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the check constraint
with connection.schema_editor() as editor:
Author._meta.constraints = []
editor.remove_constraint(Author, constraint)
def test_unique(self):
"""
Tests removing and adding unique constraints to a single column.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the field is unique to begin with
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be non-unique
old_field = Tag._meta.get_field("slug")
new_field = SlugField(unique=False)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
# Ensure the field is no longer unique
Tag.objects.create(title="foo", slug="foo")
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be unique
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
# Ensure the field is unique again
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Rename the field
new_field3 = SlugField(unique=True)
new_field3.set_attributes_from_name("slug2")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field2, new_field3, strict=True)
# Ensure the field is still unique
TagUniqueRename.objects.create(title="foo", slug2="foo")
with self.assertRaises(IntegrityError):
TagUniqueRename.objects.create(title="bar", slug2="foo")
Tag.objects.all().delete()
def test_unique_name_quoting(self):
old_table_name = TagUniqueRename._meta.db_table
try:
with connection.schema_editor() as editor:
editor.create_model(TagUniqueRename)
editor.alter_db_table(TagUniqueRename, old_table_name, "unique-table")
TagUniqueRename._meta.db_table = "unique-table"
# This fails if the unique index name isn't quoted.
editor.alter_unique_together(TagUniqueRename, [], (("title", "slug2"),))
finally:
with connection.schema_editor() as editor:
editor.delete_model(TagUniqueRename)
TagUniqueRename._meta.db_table = old_table_name
@isolate_apps("schema")
@skipUnlessDBFeature("supports_foreign_keys")
def test_unique_no_unnecessary_fk_drops(self):
"""
If AlterField isn't selective about dropping foreign key constraints
when modifying a field with a unique constraint, the AlterField
incorrectly drops and recreates the Book.author foreign key even though
it doesn't restrict the field being changed (#29193).
"""
class Author(Model):
name = CharField(max_length=254, unique=True)
class Meta:
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
new_field = CharField(max_length=255, unique=True)
new_field.model = Author
new_field.set_attributes_from_name("name")
with self.assertLogs("django.db.backends.schema", "DEBUG") as cm:
with connection.schema_editor() as editor:
editor.alter_field(Author, Author._meta.get_field("name"), new_field)
# One SQL statement is executed to alter the field.
self.assertEqual(len(cm.records), 1)
@isolate_apps("schema")
def test_unique_and_reverse_m2m(self):
"""
AlterField can modify a unique field when there's a reverse M2M
relation on the model.
"""
class Tag(Model):
title = CharField(max_length=255)
slug = SlugField(unique=True)
class Meta:
app_label = "schema"
class Book(Model):
tags = ManyToManyField(Tag, related_name="books")
class Meta:
app_label = "schema"
self.isolated_local_models = [Book._meta.get_field("tags").remote_field.through]
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Book)
new_field = SlugField(max_length=75, unique=True)
new_field.model = Tag
new_field.set_attributes_from_name("slug")
with self.assertLogs("django.db.backends.schema", "DEBUG") as cm:
with connection.schema_editor() as editor:
editor.alter_field(Tag, Tag._meta.get_field("slug"), new_field)
# One SQL statement is executed to alter the field.
self.assertEqual(len(cm.records), 1)
# Ensure that the field is still unique.
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_field_unique_does_not_remove_meta_constraints(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorWithUniqueName)
self.local_models = [AuthorWithUniqueName]
# Add the custom unique constraint
constraint = UniqueConstraint(fields=["name"], name="author_name_uniq")
custom_constraint_name = constraint.name
AuthorWithUniqueName._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(AuthorWithUniqueName, constraint)
# Ensure the constraints exist
constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Alter the column to remove field uniqueness
old_field = AuthorWithUniqueName._meta.get_field("name")
new_field = CharField(max_length=255)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithUniqueName, old_field, new_field, strict=True)
constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 0)
# Alter the column to re-add field uniqueness
new_field2 = AuthorWithUniqueName._meta.get_field("name")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithUniqueName, new_field, new_field2, strict=True)
constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the unique constraint
with connection.schema_editor() as editor:
AuthorWithUniqueName._meta.constraints = []
editor.remove_constraint(AuthorWithUniqueName, constraint)
def test_unique_together(self):
"""
Tests removing and adding unique_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
# Ensure the fields are unique to begin with
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2011, slug="foo")
UniqueTest.objects.create(year=2011, slug="bar")
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter the model to its non-unique-together companion
with connection.schema_editor() as editor:
editor.alter_unique_together(
UniqueTest, UniqueTest._meta.unique_together, []
)
# Ensure the fields are no longer unique
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_unique_together(
UniqueTest, [], UniqueTest._meta.unique_together
)
# Ensure the fields are unique again
UniqueTest.objects.create(year=2012, slug="foo")
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
def test_unique_together_with_fk(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [["author", "title"]])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [["author", "title"]], [])
def test_unique_together_with_fk_with_existing_index(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key, where the foreign key is added after the model is
created.
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithoutAuthor)
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
editor.add_field(BookWithoutAuthor, new_field)
# Ensure the fields aren't unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [["author", "title"]])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [["author", "title"]], [])
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_unique_together_does_not_remove_meta_constraints(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorWithUniqueNameAndBirthday)
self.local_models = [AuthorWithUniqueNameAndBirthday]
# Add the custom unique constraint
constraint = UniqueConstraint(
fields=["name", "birthday"], name="author_name_birthday_uniq"
)
custom_constraint_name = constraint.name
AuthorWithUniqueNameAndBirthday._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(AuthorWithUniqueNameAndBirthday, constraint)
# Ensure the constraints exist
constraints = self.get_constraints(
AuthorWithUniqueNameAndBirthday._meta.db_table
)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Remove unique together
unique_together = AuthorWithUniqueNameAndBirthday._meta.unique_together
with connection.schema_editor() as editor:
editor.alter_unique_together(
AuthorWithUniqueNameAndBirthday, unique_together, []
)
constraints = self.get_constraints(
AuthorWithUniqueNameAndBirthday._meta.db_table
)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 0)
# Re-add unique together
with connection.schema_editor() as editor:
editor.alter_unique_together(
AuthorWithUniqueNameAndBirthday, [], unique_together
)
constraints = self.get_constraints(
AuthorWithUniqueNameAndBirthday._meta.db_table
)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the unique constraint
with connection.schema_editor() as editor:
AuthorWithUniqueNameAndBirthday._meta.constraints = []
editor.remove_constraint(AuthorWithUniqueNameAndBirthday, constraint)
def test_unique_constraint(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(fields=["name"], name="name_uq")
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIs(sql.references_table(table), True)
self.assertIs(sql.references_column(table, "name"), True)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(Upper("name").desc(), name="func_upper_uq")
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, constraint.name, ["DESC"])
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
# SQL contains a database function.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_composite_func_unique_constraint(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithSlug)
constraint = UniqueConstraint(
Upper("title"),
Lower("slug"),
name="func_upper_lower_unq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(BookWithSlug, constraint)
sql = constraint.create_sql(BookWithSlug, editor)
table = BookWithSlug._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
# SQL contains database functions.
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "slug"), True)
sql = str(sql)
self.assertIn("UPPER(%s)" % editor.quote_name("title"), sql)
self.assertIn("LOWER(%s)" % editor.quote_name("slug"), sql)
self.assertLess(sql.index("UPPER"), sql.index("LOWER"))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(BookWithSlug, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_unique_constraint_field_and_expression(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
F("height").desc(),
"uuid",
Lower("name").asc(),
name="func_f_lower_field_unq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, constraint.name, ["DESC", "ASC", "ASC"])
constraints = self.get_constraints(table)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertEqual(len(constraints[constraint.name]["columns"]), 3)
self.assertEqual(constraints[constraint.name]["columns"][1], "uuid")
# SQL contains database functions and columns.
self.assertIs(sql.references_column(table, "height"), True)
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "uuid"), True)
self.assertIn("LOWER(%s)" % editor.quote_name("name"), str(sql))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_partial_indexes")
def test_func_unique_constraint_partial(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
Upper("name"),
name="func_upper_cond_weight_uq",
condition=Q(weight__isnull=False),
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql))
self.assertIn(
"WHERE %s IS NOT NULL" % editor.quote_name("weight"),
str(sql),
)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_covering_indexes")
def test_func_unique_constraint_covering(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
Upper("name"),
name="func_upper_covering_uq",
include=["weight", "height"],
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertEqual(
constraints[constraint.name]["columns"],
[None, "weight", "height"],
)
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "weight"), True)
self.assertIs(sql.references_column(table, "height"), True)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql))
self.assertIn(
"INCLUDE (%s, %s)"
% (
editor.quote_name("weight"),
editor.quote_name("height"),
),
str(sql),
)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_lookups(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
with register_lookup(CharField, Lower), register_lookup(IntegerField, Abs):
constraint = UniqueConstraint(
F("name__lower"),
F("weight__abs"),
name="func_lower_abs_lookup_uq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
# SQL contains columns.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "weight"), True)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_collate(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithSlug)
constraint = UniqueConstraint(
Collate(F("title"), collation=collation).desc(),
Collate("slug", collation=collation),
name="func_collate_uq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(BookWithSlug, constraint)
sql = constraint.create_sql(BookWithSlug, editor)
table = BookWithSlug._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, constraint.name, ["DESC", "ASC"])
# SQL contains columns and a collation.
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "slug"), True)
self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(BookWithSlug, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipIfDBFeature("supports_expression_indexes")
def test_func_unique_constraint_unsupported(self):
# UniqueConstraint is ignored on databases that don't support indexes on
# expressions.
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(F("name"), name="func_name_uq")
with connection.schema_editor() as editor, self.assertNumQueries(0):
self.assertIsNone(editor.add_constraint(Author, constraint))
self.assertIsNone(editor.remove_constraint(Author, constraint))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_nonexistent_field(self):
constraint = UniqueConstraint(Lower("nonexistent"), name="func_nonexistent_uq")
msg = (
"Cannot resolve keyword 'nonexistent' into field. Choices are: "
"height, id, name, uuid, weight"
)
with self.assertRaisesMessage(FieldError, msg):
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_nondeterministic(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(Random(), name="func_random_uq")
with connection.schema_editor() as editor:
with self.assertRaises(DatabaseError):
editor.add_constraint(Author, constraint)
@ignore_warnings(category=RemovedInDjango51Warning)
def test_index_together(self):
"""
Tests removing and adding index_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure there's no index on the year/slug columns first
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c["columns"] == ["slug", "title"]
),
False,
)
# Alter the model to add an index
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [], [("slug", "title")])
# Ensure there is now an index
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c["columns"] == ["slug", "title"]
),
True,
)
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [("slug", "title")], [])
# Ensure there's no index
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c["columns"] == ["slug", "title"]
),
False,
)
@ignore_warnings(category=RemovedInDjango51Warning)
def test_index_together_with_fk(self):
"""
Tests removing and adding index_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.index_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [], [["author", "title"]])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [["author", "title"]], [])
@ignore_warnings(category=RemovedInDjango51Warning)
@isolate_apps("schema")
def test_create_index_together(self):
"""
Tests creating models with index_together already defined
"""
class TagIndexed(Model):
title = CharField(max_length=255)
slug = SlugField(unique=True)
class Meta:
app_label = "schema"
index_together = [["slug", "title"]]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(TagIndexed)
self.isolated_local_models = [TagIndexed]
# Ensure there is an index
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tagindexed").values()
if c["columns"] == ["slug", "title"]
),
True,
)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
@ignore_warnings(category=RemovedInDjango51Warning)
@isolate_apps("schema")
def test_remove_index_together_does_not_remove_meta_indexes(self):
class AuthorWithIndexedNameAndBirthday(Model):
name = CharField(max_length=255)
birthday = DateField()
class Meta:
app_label = "schema"
index_together = [["name", "birthday"]]
with connection.schema_editor() as editor:
editor.create_model(AuthorWithIndexedNameAndBirthday)
self.isolated_local_models = [AuthorWithIndexedNameAndBirthday]
# Add the custom index
index = Index(fields=["name", "birthday"], name="author_name_birthday_idx")
custom_index_name = index.name
AuthorWithIndexedNameAndBirthday._meta.indexes = [index]
with connection.schema_editor() as editor:
editor.add_index(AuthorWithIndexedNameAndBirthday, index)
# Ensure the indexes exist
constraints = self.get_constraints(
AuthorWithIndexedNameAndBirthday._meta.db_table
)
self.assertIn(custom_index_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["index"]
and name != custom_index_name
]
self.assertEqual(len(other_constraints), 1)
# Remove index together
index_together = AuthorWithIndexedNameAndBirthday._meta.index_together
with connection.schema_editor() as editor:
editor.alter_index_together(
AuthorWithIndexedNameAndBirthday, index_together, []
)
constraints = self.get_constraints(
AuthorWithIndexedNameAndBirthday._meta.db_table
)
self.assertIn(custom_index_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["index"]
and name != custom_index_name
]
self.assertEqual(len(other_constraints), 0)
# Re-add index together
with connection.schema_editor() as editor:
editor.alter_index_together(
AuthorWithIndexedNameAndBirthday, [], index_together
)
constraints = self.get_constraints(
AuthorWithIndexedNameAndBirthday._meta.db_table
)
self.assertIn(custom_index_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["index"]
and name != custom_index_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the index
with connection.schema_editor() as editor:
AuthorWithIndexedNameAndBirthday._meta.indexes = []
editor.remove_index(AuthorWithIndexedNameAndBirthday, index)
@isolate_apps("schema")
def test_db_table(self):
"""
Tests renaming of the table
"""
class Author(Model):
name = CharField(max_length=255)
class Meta:
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = "schema"
# Create the table and one referring it.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there to begin with
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
# Alter the table
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
editor.alter_db_table(Author, "schema_author", "schema_otherauthor")
Author._meta.db_table = "schema_otherauthor"
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
# Ensure the foreign key reference was updated
self.assertForeignKeyExists(Book, "author_id", "schema_otherauthor")
# Alter the table again
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
editor.alter_db_table(Author, "schema_otherauthor", "schema_author")
# Ensure the table is still there
Author._meta.db_table = "schema_author"
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
def test_add_remove_index(self):
"""
Tests index addition and removal
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the table is there and has no index
self.assertNotIn("title", self.get_indexes(Author._meta.db_table))
# Add the index
index = Index(fields=["name"], name="author_title_idx")
with connection.schema_editor() as editor:
editor.add_index(Author, index)
self.assertIn("name", self.get_indexes(Author._meta.db_table))
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn("name", self.get_indexes(Author._meta.db_table))
def test_remove_db_index_doesnt_remove_custom_indexes(self):
"""
Changing db_index to False doesn't remove indexes from Meta.indexes.
"""
with connection.schema_editor() as editor:
editor.create_model(AuthorWithIndexedName)
self.local_models = [AuthorWithIndexedName]
# Ensure the table has its index
self.assertIn("name", self.get_indexes(AuthorWithIndexedName._meta.db_table))
# Add the custom index
index = Index(fields=["-name"], name="author_name_idx")
author_index_name = index.name
with connection.schema_editor() as editor:
db_index_name = editor._create_index_name(
table_name=AuthorWithIndexedName._meta.db_table,
column_names=("name",),
)
try:
AuthorWithIndexedName._meta.indexes = [index]
with connection.schema_editor() as editor:
editor.add_index(AuthorWithIndexedName, index)
old_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table)
self.assertIn(author_index_name, old_constraints)
self.assertIn(db_index_name, old_constraints)
# Change name field to db_index=False
old_field = AuthorWithIndexedName._meta.get_field("name")
new_field = CharField(max_length=255)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorWithIndexedName, old_field, new_field, strict=True
)
new_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table)
self.assertNotIn(db_index_name, new_constraints)
# The index from Meta.indexes is still in the database.
self.assertIn(author_index_name, new_constraints)
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(AuthorWithIndexedName, index)
finally:
AuthorWithIndexedName._meta.indexes = []
def test_order_index(self):
"""
Indexes defined with ordering (ASC/DESC) defined on column
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
# The table doesn't have an index
self.assertNotIn("title", self.get_indexes(Author._meta.db_table))
index_name = "author_name_idx"
# Add the index
index = Index(fields=["name", "-weight"], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(Author, index)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(Author._meta.db_table, index_name, ["ASC", "DESC"])
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
def test_indexes(self):
"""
Tests creation/altering of indexes
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there and has the right index
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to remove the index
old_field = Book._meta.get_field("title")
new_field = CharField(max_length=100, db_index=False)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the table is there and has no index
self.assertNotIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to re-add the index
new_field2 = Book._meta.get_field("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, new_field, new_field2, strict=True)
# Ensure the table is there and has the index again
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Add a unique column, verify that creates an implicit index
new_field3 = BookWithSlug._meta.get_field("slug")
with connection.schema_editor() as editor:
editor.add_field(Book, new_field3)
self.assertIn(
"slug",
self.get_uniques(Book._meta.db_table),
)
# Remove the unique, check the index goes with it
new_field4 = CharField(max_length=20, unique=False)
new_field4.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True)
self.assertNotIn(
"slug",
self.get_uniques(Book._meta.db_table),
)
def test_text_field_with_db_index(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorTextFieldWithIndex)
# The text_field index is present if the database supports it.
assertion = (
self.assertIn
if connection.features.supports_index_on_text_field
else self.assertNotIn
)
assertion(
"text_field", self.get_indexes(AuthorTextFieldWithIndex._meta.db_table)
)
def _index_expressions_wrappers(self):
index_expression = IndexExpression()
index_expression.set_wrapper_classes(connection)
return ", ".join(
[
wrapper_cls.__qualname__
for wrapper_cls in index_expression.wrapper_classes
]
)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_multiple_wrapper_references(self):
index = Index(OrderBy(F("name").desc(), descending=True), name="name")
msg = (
"Multiple references to %s can't be used in an indexed expression."
% self._index_expressions_wrappers()
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
editor.add_index(Author, index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_invalid_topmost_expressions(self):
index = Index(Upper(F("name").desc()), name="name")
msg = (
"%s must be topmost expressions in an indexed expression."
% self._index_expressions_wrappers()
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
editor.add_index(Author, index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Lower("name").desc(), name="func_lower_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC"])
# SQL contains a database function.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("LOWER(%s)" % editor.quote_name("name"), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_f(self):
with connection.schema_editor() as editor:
editor.create_model(Tag)
index = Index("slug", F("title").desc(), name="func_f_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Tag, index)
sql = index.create_sql(Tag, editor)
table = Tag._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(Tag._meta.db_table, index.name, ["ASC", "DESC"])
# SQL contains columns.
self.assertIs(sql.references_column(table, "slug"), True)
self.assertIs(sql.references_column(table, "title"), True)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Tag, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_lookups(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
with register_lookup(CharField, Lower), register_lookup(IntegerField, Abs):
index = Index(
F("name__lower"),
F("weight__abs"),
name="func_lower_abs_lookup_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
# SQL contains columns.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "weight"), True)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_composite_func_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Lower("name"), Upper("name"), name="func_lower_upper_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
# SQL contains database functions.
self.assertIs(sql.references_column(table, "name"), True)
sql = str(sql)
self.assertIn("LOWER(%s)" % editor.quote_name("name"), sql)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), sql)
self.assertLess(sql.index("LOWER"), sql.index("UPPER"))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_composite_func_index_field_and_expression(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
index = Index(
F("author").desc(),
Lower("title").asc(),
"pub_date",
name="func_f_lower_field_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Book, index)
sql = index.create_sql(Book, editor)
table = Book._meta.db_table
constraints = self.get_constraints(table)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC", "ASC", "ASC"])
self.assertEqual(len(constraints[index.name]["columns"]), 3)
self.assertEqual(constraints[index.name]["columns"][2], "pub_date")
# SQL contains database functions and columns.
self.assertIs(sql.references_column(table, "author_id"), True)
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "pub_date"), True)
self.assertIn("LOWER(%s)" % editor.quote_name("title"), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Book, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
@isolate_apps("schema")
def test_func_index_f_decimalfield(self):
class Node(Model):
value = DecimalField(max_digits=5, decimal_places=2)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Node)
index = Index(F("value"), name="func_f_decimalfield_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Node, index)
sql = index.create_sql(Node, editor)
table = Node._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "value"), True)
# SQL doesn't contain casting.
self.assertNotIn("CAST", str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Node, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_cast(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Cast("weight", FloatField()), name="func_cast_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "weight"), True)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_collate(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithSlug)
index = Index(
Collate(F("title"), collation=collation).desc(),
Collate("slug", collation=collation),
name="func_collate_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(BookWithSlug, index)
sql = index.create_sql(BookWithSlug, editor)
table = Book._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC", "ASC"])
# SQL contains columns and a collation.
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "slug"), True)
self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Book, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
@skipIfDBFeature("collate_as_index_expression")
def test_func_index_collate_f_ordered(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(
Collate(F("name").desc(), collation=collation),
name="func_collate_f_desc_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC"])
# SQL contains columns and a collation.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_calc(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(F("height") / (F("weight") + Value(5)), name="func_calc_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
# SQL contains columns and expressions.
self.assertIs(sql.references_column(table, "height"), True)
self.assertIs(sql.references_column(table, "weight"), True)
sql = str(sql)
self.assertIs(
sql.index(editor.quote_name("height"))
< sql.index("/")
< sql.index(editor.quote_name("weight"))
< sql.index("+")
< sql.index("5"),
True,
)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_json_field")
@isolate_apps("schema")
def test_func_index_json_key_transform(self):
class JSONModel(Model):
field = JSONField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(JSONModel)
self.isolated_local_models = [JSONModel]
index = Index("field__some_key", name="func_json_key_idx")
with connection.schema_editor() as editor:
editor.add_index(JSONModel, index)
sql = index.create_sql(JSONModel, editor)
table = JSONModel._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "field"), True)
with connection.schema_editor() as editor:
editor.remove_index(JSONModel, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_json_field")
@isolate_apps("schema")
def test_func_index_json_key_transform_cast(self):
class JSONModel(Model):
field = JSONField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(JSONModel)
self.isolated_local_models = [JSONModel]
index = Index(
Cast(KeyTextTransform("some_key", "field"), IntegerField()),
name="func_json_key_cast_idx",
)
with connection.schema_editor() as editor:
editor.add_index(JSONModel, index)
sql = index.create_sql(JSONModel, editor)
table = JSONModel._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "field"), True)
with connection.schema_editor() as editor:
editor.remove_index(JSONModel, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipIfDBFeature("supports_expression_indexes")
def test_func_index_unsupported(self):
# Index is ignored on databases that don't support indexes on
# expressions.
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(F("name"), name="random_idx")
with connection.schema_editor() as editor, self.assertNumQueries(0):
self.assertIsNone(editor.add_index(Author, index))
self.assertIsNone(editor.remove_index(Author, index))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_nonexistent_field(self):
index = Index(Lower("nonexistent"), name="func_nonexistent_idx")
msg = (
"Cannot resolve keyword 'nonexistent' into field. Choices are: "
"height, id, name, uuid, weight"
)
with self.assertRaisesMessage(FieldError, msg):
with connection.schema_editor() as editor:
editor.add_index(Author, index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_nondeterministic(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Random(), name="func_random_idx")
with connection.schema_editor() as editor:
with self.assertRaises(DatabaseError):
editor.add_index(Author, index)
def test_primary_key(self):
"""
Tests altering of the primary key
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the table is there and has the right PK
self.assertEqual(self.get_primary_key(Tag._meta.db_table), "id")
# Alter to change the PK
id_field = Tag._meta.get_field("id")
old_field = Tag._meta.get_field("slug")
new_field = SlugField(primary_key=True)
new_field.set_attributes_from_name("slug")
new_field.model = Tag
with connection.schema_editor() as editor:
editor.remove_field(Tag, id_field)
editor.alter_field(Tag, old_field, new_field)
# Ensure the PK changed
self.assertNotIn(
"id",
self.get_indexes(Tag._meta.db_table),
)
self.assertEqual(self.get_primary_key(Tag._meta.db_table), "slug")
def test_alter_primary_key_the_same_name(self):
with connection.schema_editor() as editor:
editor.create_model(Thing)
old_field = Thing._meta.get_field("when")
new_field = CharField(max_length=2, primary_key=True)
new_field.set_attributes_from_name("when")
new_field.model = Thing
with connection.schema_editor() as editor:
editor.alter_field(Thing, old_field, new_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
with connection.schema_editor() as editor:
editor.alter_field(Thing, new_field, old_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
def test_context_manager_exit(self):
"""
Ensures transaction is correctly closed when an error occurs
inside a SchemaEditor context.
"""
class SomeError(Exception):
pass
try:
with connection.schema_editor():
raise SomeError
except SomeError:
self.assertFalse(connection.in_atomic_block)
@skipIfDBFeature("can_rollback_ddl")
def test_unsupported_transactional_ddl_disallowed(self):
message = (
"Executing DDL statements while in a transaction on databases "
"that can't perform a rollback is prohibited."
)
with atomic(), connection.schema_editor() as editor:
with self.assertRaisesMessage(TransactionManagementError, message):
editor.execute(
editor.sql_create_table % {"table": "foo", "definition": ""}
)
@skipUnlessDBFeature("supports_foreign_keys", "indexes_foreign_keys")
def test_foreign_key_index_long_names_regression(self):
"""
Regression test for #21497.
Only affects databases that supports foreign keys.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Find the properly shortened column name
column_name = connection.ops.quote_name(
"author_foreign_key_with_really_long_field_name_id"
)
column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase
# Ensure the table is there and has an index on the column
self.assertIn(
column_name,
self.get_indexes(BookWithLongName._meta.db_table),
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_add_foreign_key_long_names(self):
"""
Regression test for #23009.
Only affects databases that supports foreign keys.
"""
# Create the initial tables
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Add a second FK, this would fail due to long ref name before the fix
new_field = ForeignKey(
AuthorWithEvenLongerName, CASCADE, related_name="something"
)
new_field.set_attributes_from_name(
"author_other_really_long_named_i_mean_so_long_fk"
)
with connection.schema_editor() as editor:
editor.add_field(BookWithLongName, new_field)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_foreign_keys")
def test_add_foreign_key_quoted_db_table(self):
class Author(Model):
class Meta:
db_table = '"table_author_double_quoted"'
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
self.isolated_local_models = [Author]
if connection.vendor == "mysql":
self.assertForeignKeyExists(
Book, "author_id", '"table_author_double_quoted"'
)
else:
self.assertForeignKeyExists(Book, "author_id", "table_author_double_quoted")
def test_add_foreign_object(self):
with connection.schema_editor() as editor:
editor.create_model(BookForeignObj)
self.local_models = [BookForeignObj]
new_field = ForeignObject(
Author, on_delete=CASCADE, from_fields=["author_id"], to_fields=["id"]
)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.add_field(BookForeignObj, new_field)
def test_creation_deletion_reserved_names(self):
"""
Tries creating a model's table, and then deleting it when it has a
SQL reserved name.
"""
# Create the table
with connection.schema_editor() as editor:
try:
editor.create_model(Thing)
except OperationalError as e:
self.fail(
"Errors when applying initial migration for a model "
"with a table named after an SQL reserved word: %s" % e
)
# The table is there
list(Thing.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Thing)
# The table is gone
with self.assertRaises(DatabaseError):
list(Thing.objects.all())
def test_remove_constraints_capital_letters(self):
"""
#23065 - Constraint names must be quoted if they contain capital letters.
"""
def get_field(*args, field_class=IntegerField, **kwargs):
kwargs["db_column"] = "CamelCase"
field = field_class(*args, **kwargs)
field.set_attributes_from_name("CamelCase")
return field
model = Author
field = get_field()
table = model._meta.db_table
column = field.column
identifier_converter = connection.introspection.identifier_converter
with connection.schema_editor() as editor:
editor.create_model(model)
editor.add_field(model, field)
constraint_name = "CamelCaseIndex"
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(
editor.sql_create_index
% {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"using": "",
"columns": editor.quote_name(column),
"extra": "",
"condition": "",
"include": "",
}
)
self.assertIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
editor.alter_field(model, get_field(db_index=True), field, strict=True)
self.assertNotIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
constraint_name = "CamelCaseUniqConstraint"
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(editor._create_unique_sql(model, [field], constraint_name))
self.assertIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
editor.alter_field(model, get_field(unique=True), field, strict=True)
self.assertNotIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
if editor.sql_create_fk and connection.features.can_introspect_foreign_keys:
constraint_name = "CamelCaseFKConstraint"
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(
editor.sql_create_fk
% {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"column": editor.quote_name(column),
"to_table": editor.quote_name(table),
"to_column": editor.quote_name(model._meta.auto_field.column),
"deferrable": connection.ops.deferrable_sql(),
}
)
self.assertIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
editor.alter_field(
model,
get_field(Author, CASCADE, field_class=ForeignKey),
field,
strict=True,
)
self.assertNotIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
def test_add_field_use_effective_default(self):
"""
#23987 - effective_default() should be used as the field default when
adding a new field.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name="Anonymous1")
# Add new CharField to ensure default will be used from effective_default
new_field = CharField(max_length=15, blank=True)
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(
item[0],
None if connection.features.interprets_empty_strings_as_nulls else "",
)
def test_add_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name="Anonymous1")
# Add new CharField with a default
new_field = CharField(max_length=15, blank=True, default="surname default")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], "surname default")
# And that the default is no longer set in the database.
field = next(
f
for f in connection.introspection.get_table_description(
cursor, "schema_author"
)
if f.name == "surname"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_add_field_default_nullable(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add new nullable CharField with a default.
new_field = CharField(max_length=15, blank=True, null=True, default="surname")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
Author.objects.create(name="Anonymous1")
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertIsNone(item[0])
field = next(
f
for f in connection.introspection.get_table_description(
cursor,
"schema_author",
)
if f.name == "surname"
)
# Field is still nullable.
self.assertTrue(field.null_ok)
# The database default is no longer set.
if connection.features.can_introspect_default:
self.assertIn(field.default, ["NULL", None])
def test_add_textfield_default_nullable(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add new nullable TextField with a default.
new_field = TextField(blank=True, null=True, default="text")
new_field.set_attributes_from_name("description")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
Author.objects.create(name="Anonymous1")
with connection.cursor() as cursor:
cursor.execute("SELECT description FROM schema_author;")
item = cursor.fetchall()[0]
self.assertIsNone(item[0])
field = next(
f
for f in connection.introspection.get_table_description(
cursor,
"schema_author",
)
if f.name == "description"
)
# Field is still nullable.
self.assertTrue(field.null_ok)
# The database default is no longer set.
if connection.features.can_introspect_default:
self.assertIn(field.default, ["NULL", None])
def test_alter_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name="Anonymous1")
self.assertIsNone(Author.objects.get().height)
old_field = Author._meta.get_field("height")
# The default from the new field is used in updating existing rows.
new_field = IntegerField(blank=True, default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(Author.objects.get().height, 42)
# The database default should be removed.
with connection.cursor() as cursor:
field = next(
f
for f in connection.introspection.get_table_description(
cursor, "schema_author"
)
if f.name == "height"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_alter_field_default_doesnt_perform_queries(self):
"""
No queries are performed if a field default changes and the field's
not changing from null to non-null.
"""
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_default = old_field.default * 2
new_field = PositiveIntegerField(null=True, blank=True, default=new_default)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(
AuthorWithDefaultHeight, old_field, new_field, strict=True
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_fk_attributes_noop(self):
"""
No queries are performed when changing field attributes that don't
affect the schema.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
old_field = Book._meta.get_field("author")
new_field = ForeignKey(
Author,
blank=True,
editable=False,
error_messages={"invalid": "error message"},
help_text="help text",
limit_choices_to={"limit": "choice"},
on_delete=PROTECT,
related_name="related_name",
related_query_name="related_query_name",
validators=[lambda x: x],
verbose_name="verbose name",
)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Book, old_field, new_field, strict=True)
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Book, new_field, old_field, strict=True)
def test_alter_field_choices_noop(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("name")
new_field = CharField(
choices=(("Jane", "Jane"), ("Joe", "Joe")),
max_length=255,
)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Author, old_field, new_field, strict=True)
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Author, new_field, old_field, strict=True)
def test_add_textfield_unhashable_default(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name="Anonymous1")
# Create a field that has an unhashable default
new_field = TextField(default={})
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_add_indexed_charfield(self):
field = CharField(max_length=255, db_index=True)
field.set_attributes_from_name("nom_de_plume")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
# Should create two indexes; one for like operator.
self.assertEqual(
self.get_constraints_for_column(Author, "nom_de_plume"),
[
"schema_author_nom_de_plume_7570a851",
"schema_author_nom_de_plume_7570a851_like",
],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_add_unique_charfield(self):
field = CharField(max_length=255, unique=True)
field.set_attributes_from_name("nom_de_plume")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
# Should create two indexes; one for like operator.
self.assertEqual(
self.get_constraints_for_column(Author, "nom_de_plume"),
[
"schema_author_nom_de_plume_7570a851_like",
"schema_author_nom_de_plume_key",
],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_index_to_charfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
# Alter to add db_index=True and create 2 indexes.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, db_index=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, "name"),
["schema_author_name_1fbc5617", "schema_author_name_1fbc5617_like"],
)
# Remove db_index=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_unique_to_charfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
# Alter to add unique=True and create 2 indexes.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, "name"),
["schema_author_name_1fbc5617_like", "schema_author_name_1fbc5617_uniq"],
)
# Remove unique=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_index_to_textfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Note)
self.assertEqual(self.get_constraints_for_column(Note, "info"), [])
# Alter to add db_index=True and create 2 indexes.
old_field = Note._meta.get_field("info")
new_field = TextField(db_index=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Note, "info"),
["schema_note_info_4b0ea695", "schema_note_info_4b0ea695_like"],
)
# Remove db_index=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Note, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Note, "info"), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_unique_to_charfield_with_db_index(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
# Alter to add unique=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field("title")
new_field = CharField(max_length=100, db_index=True, unique=True)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"],
)
# Alter to remove unique=True (should drop unique index)
new_field2 = CharField(max_length=100, db_index=True)
new_field2.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_remove_unique_and_db_index_from_charfield(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
# Alter to add unique=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field("title")
new_field = CharField(max_length=100, db_index=True, unique=True)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"],
)
# Alter to remove both unique=True and db_index=True (should drop all indexes)
new_field2 = CharField(max_length=100)
new_field2.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"), []
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_swap_unique_and_db_index_with_charfield(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
# Alter to set unique=True and remove db_index=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field("title")
new_field = CharField(max_length=100, unique=True)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"],
)
# Alter to set db_index=True and remove unique=True (should restore index)
new_field2 = CharField(max_length=100, db_index=True)
new_field2.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_db_index_to_charfield_with_unique(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Tag)
self.assertEqual(
self.get_constraints_for_column(Tag, "slug"),
["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"],
)
# Alter to add db_index=True
old_field = Tag._meta.get_field("slug")
new_field = SlugField(db_index=True, unique=True)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Tag, "slug"),
["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"],
)
# Alter to remove db_index=True
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(Tag, "slug"),
["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"],
)
def test_alter_field_add_index_to_integerfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, "weight"), [])
# Alter to add db_index=True and create index.
old_field = Author._meta.get_field("weight")
new_field = IntegerField(null=True, db_index=True)
new_field.set_attributes_from_name("weight")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, "weight"),
["schema_author_weight_587740f9"],
)
# Remove db_index=True to drop index.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, "weight"), [])
def test_alter_pk_with_self_referential_field(self):
"""
Changing the primary key field name of a model with a self-referential
foreign key (#26384).
"""
with connection.schema_editor() as editor:
editor.create_model(Node)
old_field = Node._meta.get_field("node_id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(Node, old_field, new_field, strict=True)
self.assertForeignKeyExists(Node, "parent_id", Node._meta.db_table)
@mock.patch("django.db.backends.base.schema.datetime")
@mock.patch("django.db.backends.base.schema.timezone")
def test_add_datefield_and_datetimefield_use_effective_default(
self, mocked_datetime, mocked_tz
):
"""
effective_default() should be used for DateField, DateTimeField, and
TimeField if auto_now or auto_now_add is set (#25005).
"""
now = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1)
now_tz = datetime.datetime(
month=1, day=1, year=2000, hour=1, minute=1, tzinfo=datetime.timezone.utc
)
mocked_datetime.now = mock.MagicMock(return_value=now)
mocked_tz.now = mock.MagicMock(return_value=now_tz)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Check auto_now/auto_now_add attributes are not defined
columns = self.column_classes(Author)
self.assertNotIn("dob_auto_now", columns)
self.assertNotIn("dob_auto_now_add", columns)
self.assertNotIn("dtob_auto_now", columns)
self.assertNotIn("dtob_auto_now_add", columns)
self.assertNotIn("tob_auto_now", columns)
self.assertNotIn("tob_auto_now_add", columns)
# Create a row
Author.objects.create(name="Anonymous1")
# Ensure fields were added with the correct defaults
dob_auto_now = DateField(auto_now=True)
dob_auto_now.set_attributes_from_name("dob_auto_now")
self.check_added_field_default(
editor,
Author,
dob_auto_now,
"dob_auto_now",
now.date(),
cast_function=lambda x: x.date(),
)
dob_auto_now_add = DateField(auto_now_add=True)
dob_auto_now_add.set_attributes_from_name("dob_auto_now_add")
self.check_added_field_default(
editor,
Author,
dob_auto_now_add,
"dob_auto_now_add",
now.date(),
cast_function=lambda x: x.date(),
)
dtob_auto_now = DateTimeField(auto_now=True)
dtob_auto_now.set_attributes_from_name("dtob_auto_now")
self.check_added_field_default(
editor,
Author,
dtob_auto_now,
"dtob_auto_now",
now,
)
dt_tm_of_birth_auto_now_add = DateTimeField(auto_now_add=True)
dt_tm_of_birth_auto_now_add.set_attributes_from_name("dtob_auto_now_add")
self.check_added_field_default(
editor,
Author,
dt_tm_of_birth_auto_now_add,
"dtob_auto_now_add",
now,
)
tob_auto_now = TimeField(auto_now=True)
tob_auto_now.set_attributes_from_name("tob_auto_now")
self.check_added_field_default(
editor,
Author,
tob_auto_now,
"tob_auto_now",
now.time(),
cast_function=lambda x: x.time(),
)
tob_auto_now_add = TimeField(auto_now_add=True)
tob_auto_now_add.set_attributes_from_name("tob_auto_now_add")
self.check_added_field_default(
editor,
Author,
tob_auto_now_add,
"tob_auto_now_add",
now.time(),
cast_function=lambda x: x.time(),
)
def test_namespaced_db_table_create_index_name(self):
"""
Table names are stripped of their namespace/schema before being used to
generate index names.
"""
with connection.schema_editor() as editor:
max_name_length = connection.ops.max_name_length() or 200
namespace = "n" * max_name_length
table_name = "t" * max_name_length
namespaced_table_name = '"%s"."%s"' % (namespace, table_name)
self.assertEqual(
editor._create_index_name(table_name, []),
editor._create_index_name(namespaced_table_name, []),
)
@unittest.skipUnless(
connection.vendor == "oracle", "Oracle specific db_table syntax"
)
def test_creation_with_db_table_double_quotes(self):
oracle_user = connection.creation._test_database_user()
class Student(Model):
name = CharField(max_length=30)
class Meta:
app_label = "schema"
apps = new_apps
db_table = '"%s"."DJANGO_STUDENT_TABLE"' % oracle_user
class Document(Model):
name = CharField(max_length=30)
students = ManyToManyField(Student)
class Meta:
app_label = "schema"
apps = new_apps
db_table = '"%s"."DJANGO_DOCUMENT_TABLE"' % oracle_user
self.isolated_local_models = [Student, Document]
with connection.schema_editor() as editor:
editor.create_model(Student)
editor.create_model(Document)
doc = Document.objects.create(name="Test Name")
student = Student.objects.create(name="Some man")
doc.students.add(student)
@isolate_apps("schema")
@unittest.skipUnless(
connection.vendor == "postgresql", "PostgreSQL specific db_table syntax."
)
def test_namespaced_db_table_foreign_key_reference(self):
with connection.cursor() as cursor:
cursor.execute("CREATE SCHEMA django_schema_tests")
def delete_schema():
with connection.cursor() as cursor:
cursor.execute("DROP SCHEMA django_schema_tests CASCADE")
self.addCleanup(delete_schema)
class Author(Model):
class Meta:
app_label = "schema"
class Book(Model):
class Meta:
app_label = "schema"
db_table = '"django_schema_tests"."schema_book"'
author = ForeignKey(Author, CASCADE)
author.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.add_field(Book, author)
def test_rename_table_renames_deferred_sql_references(self):
atomic_rename = connection.features.supports_atomic_references_rename
with connection.schema_editor(atomic=atomic_rename) as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.alter_db_table(Author, "schema_author", "schema_renamed_author")
editor.alter_db_table(Author, "schema_book", "schema_renamed_book")
try:
self.assertGreater(len(editor.deferred_sql), 0)
for statement in editor.deferred_sql:
self.assertIs(statement.references_table("schema_author"), False)
self.assertIs(statement.references_table("schema_book"), False)
finally:
editor.alter_db_table(Author, "schema_renamed_author", "schema_author")
editor.alter_db_table(Author, "schema_renamed_book", "schema_book")
def test_rename_column_renames_deferred_sql_references(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
old_title = Book._meta.get_field("title")
new_title = CharField(max_length=100, db_index=True)
new_title.set_attributes_from_name("renamed_title")
editor.alter_field(Book, old_title, new_title)
old_author = Book._meta.get_field("author")
new_author = ForeignKey(Author, CASCADE)
new_author.set_attributes_from_name("renamed_author")
editor.alter_field(Book, old_author, new_author)
self.assertGreater(len(editor.deferred_sql), 0)
for statement in editor.deferred_sql:
self.assertIs(statement.references_column("book", "title"), False)
self.assertIs(statement.references_column("book", "author_id"), False)
@isolate_apps("schema")
def test_referenced_field_without_constraint_rename_inside_atomic_block(self):
"""
Foreign keys without database level constraint don't prevent the field
they reference from being renamed in an atomic block.
"""
class Foo(Model):
field = CharField(max_length=255, unique=True)
class Meta:
app_label = "schema"
class Bar(Model):
foo = ForeignKey(Foo, CASCADE, to_field="field", db_constraint=False)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo, Bar]
with connection.schema_editor() as editor:
editor.create_model(Foo)
editor.create_model(Bar)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("renamed")
with connection.schema_editor(atomic=True) as editor:
editor.alter_field(Foo, Foo._meta.get_field("field"), new_field)
@isolate_apps("schema")
def test_referenced_table_without_constraint_rename_inside_atomic_block(self):
"""
Foreign keys without database level constraint don't prevent the table
they reference from being renamed in an atomic block.
"""
class Foo(Model):
field = CharField(max_length=255, unique=True)
class Meta:
app_label = "schema"
class Bar(Model):
foo = ForeignKey(Foo, CASCADE, to_field="field", db_constraint=False)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo, Bar]
with connection.schema_editor() as editor:
editor.create_model(Foo)
editor.create_model(Bar)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("renamed")
with connection.schema_editor(atomic=True) as editor:
editor.alter_db_table(Foo, Foo._meta.db_table, "renamed_table")
Foo._meta.db_table = "renamed_table"
@isolate_apps("schema")
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_db_collation_charfield(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
class Foo(Model):
field = CharField(max_length=255, db_collation=collation)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo]
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.assertEqual(
self.get_column_collation(Foo._meta.db_table, "field"),
collation,
)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_collation_on_textfield")
def test_db_collation_textfield(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
class Foo(Model):
field = TextField(db_collation=collation)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo]
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.assertEqual(
self.get_column_collation(Foo._meta.db_table, "field"),
collation,
)
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_add_field_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Author)
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("alias")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertEqual(
columns["alias"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(columns["alias"][1][8], collation)
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_alter_field_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("name")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_column_collation(Author._meta.db_table, "name"),
collation,
)
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertIsNone(self.get_column_collation(Author._meta.db_table, "name"))
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_alter_primary_key_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Thing)
old_field = Thing._meta.get_field("when")
new_field = CharField(max_length=1, db_collation=collation, primary_key=True)
new_field.set_attributes_from_name("when")
new_field.model = Thing
with connection.schema_editor() as editor:
editor.alter_field(Thing, old_field, new_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
self.assertEqual(
self.get_column_collation(Thing._meta.db_table, "when"),
collation,
)
with connection.schema_editor() as editor:
editor.alter_field(Thing, new_field, old_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
self.assertIsNone(self.get_column_collation(Thing._meta.db_table, "when"))
@skipUnlessDBFeature(
"supports_collation_on_charfield", "supports_collation_on_textfield"
)
def test_alter_field_type_and_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("info")
new_field.model = Note
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(
columns["info"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(columns["info"][1][8], collation)
with connection.schema_editor() as editor:
editor.alter_field(Note, new_field, old_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns["info"][0], "TextField")
self.assertIsNone(columns["info"][1][8])
@skipUnlessDBFeature(
"supports_collation_on_charfield",
"supports_non_deterministic_collations",
)
def test_ci_cs_db_collation(self):
cs_collation = connection.features.test_collations.get("cs")
ci_collation = connection.features.test_collations.get("ci")
try:
if connection.vendor == "mysql":
cs_collation = "latin1_general_cs"
elif connection.vendor == "postgresql":
cs_collation = "en-x-icu"
with connection.cursor() as cursor:
cursor.execute(
"CREATE COLLATION IF NOT EXISTS case_insensitive "
"(provider = icu, locale = 'und-u-ks-level2', "
"deterministic = false)"
)
ci_collation = "case_insensitive"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
# Case-insensitive collation.
old_field = Author._meta.get_field("name")
new_field_ci = CharField(max_length=255, db_collation=ci_collation)
new_field_ci.set_attributes_from_name("name")
new_field_ci.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field_ci, strict=True)
Author.objects.create(name="ANDREW")
self.assertIs(Author.objects.filter(name="Andrew").exists(), True)
# Case-sensitive collation.
new_field_cs = CharField(max_length=255, db_collation=cs_collation)
new_field_cs.set_attributes_from_name("name")
new_field_cs.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field_ci, new_field_cs, strict=True)
self.assertIs(Author.objects.filter(name="Andrew").exists(), False)
finally:
if connection.vendor == "postgresql":
with connection.cursor() as cursor:
cursor.execute("DROP COLLATION IF EXISTS case_insensitive")
|
4e34c957a8c14538cb05b3f0cc0a4f50b38490c0e2b56e3ef12673b1f601cdc2 | import datetime
from decimal import Decimal
from unittest import mock
from django.core.exceptions import FieldError
from django.db import NotSupportedError, connection
from django.db.models import (
Avg,
Case,
Count,
F,
IntegerField,
Max,
Min,
OuterRef,
Q,
RowRange,
Subquery,
Sum,
Value,
ValueRange,
When,
Window,
WindowFrame,
)
from django.db.models.fields.json import KeyTextTransform, KeyTransform
from django.db.models.functions import (
Cast,
CumeDist,
DenseRank,
ExtractYear,
FirstValue,
Lag,
LastValue,
Lead,
NthValue,
Ntile,
PercentRank,
Rank,
RowNumber,
Upper,
)
from django.db.models.lookups import Exact
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from .models import Classification, Detail, Employee, PastEmployeeDepartment
@skipUnlessDBFeature("supports_over_clause")
class WindowFunctionTests(TestCase):
@classmethod
def setUpTestData(cls):
classification = Classification.objects.create()
Employee.objects.bulk_create(
[
Employee(
name=e[0],
salary=e[1],
department=e[2],
hire_date=e[3],
age=e[4],
bonus=Decimal(e[1]) / 400,
classification=classification,
)
for e in [
("Jones", 45000, "Accounting", datetime.datetime(2005, 11, 1), 20),
(
"Williams",
37000,
"Accounting",
datetime.datetime(2009, 6, 1),
20,
),
("Jenson", 45000, "Accounting", datetime.datetime(2008, 4, 1), 20),
("Adams", 50000, "Accounting", datetime.datetime(2013, 7, 1), 50),
("Smith", 55000, "Sales", datetime.datetime(2007, 6, 1), 30),
("Brown", 53000, "Sales", datetime.datetime(2009, 9, 1), 30),
("Johnson", 40000, "Marketing", datetime.datetime(2012, 3, 1), 30),
("Smith", 38000, "Marketing", datetime.datetime(2009, 10, 1), 20),
("Wilkinson", 60000, "IT", datetime.datetime(2011, 3, 1), 40),
("Moore", 34000, "IT", datetime.datetime(2013, 8, 1), 40),
("Miller", 100000, "Management", datetime.datetime(2005, 6, 1), 40),
("Johnson", 80000, "Management", datetime.datetime(2005, 7, 1), 50),
]
]
)
employees = list(Employee.objects.order_by("pk"))
PastEmployeeDepartment.objects.bulk_create(
[
PastEmployeeDepartment(employee=employees[6], department="Sales"),
PastEmployeeDepartment(employee=employees[10], department="IT"),
]
)
def test_dense_rank(self):
tests = [
ExtractYear(F("hire_date")).asc(),
F("hire_date__year").asc(),
"hire_date__year",
]
for order_by in tests:
with self.subTest(order_by=order_by):
qs = Employee.objects.annotate(
rank=Window(expression=DenseRank(), order_by=order_by),
)
self.assertQuerysetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 1),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 1),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 1),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 2),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 3),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 4),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 4),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 4),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 5),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 6),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 7),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 7),
],
lambda entry: (
entry.name,
entry.salary,
entry.department,
entry.hire_date,
entry.rank,
),
ordered=False,
)
def test_department_salary(self):
qs = Employee.objects.annotate(
department_sum=Window(
expression=Sum("salary"),
partition_by=F("department"),
order_by=[F("hire_date").asc()],
)
).order_by("department", "department_sum")
self.assertQuerysetEqual(
qs,
[
("Jones", "Accounting", 45000, 45000),
("Jenson", "Accounting", 45000, 90000),
("Williams", "Accounting", 37000, 127000),
("Adams", "Accounting", 50000, 177000),
("Wilkinson", "IT", 60000, 60000),
("Moore", "IT", 34000, 94000),
("Miller", "Management", 100000, 100000),
("Johnson", "Management", 80000, 180000),
("Smith", "Marketing", 38000, 38000),
("Johnson", "Marketing", 40000, 78000),
("Smith", "Sales", 55000, 55000),
("Brown", "Sales", 53000, 108000),
],
lambda entry: (
entry.name,
entry.department,
entry.salary,
entry.department_sum,
),
)
def test_rank(self):
"""
Rank the employees based on the year they're were hired. Since there
are multiple employees hired in different years, this will contain
gaps.
"""
qs = Employee.objects.annotate(
rank=Window(
expression=Rank(),
order_by=F("hire_date__year").asc(),
)
)
self.assertQuerysetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 1),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 1),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 1),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 4),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 5),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 6),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 6),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 6),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 9),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 10),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 11),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 11),
],
lambda entry: (
entry.name,
entry.salary,
entry.department,
entry.hire_date,
entry.rank,
),
ordered=False,
)
def test_row_number(self):
"""
The row number window function computes the number based on the order
in which the tuples were inserted. Depending on the backend,
Oracle requires an ordering-clause in the Window expression.
"""
qs = Employee.objects.annotate(
row_number=Window(
expression=RowNumber(),
order_by=F("pk").asc(),
)
).order_by("pk")
self.assertQuerysetEqual(
qs,
[
("Jones", "Accounting", 1),
("Williams", "Accounting", 2),
("Jenson", "Accounting", 3),
("Adams", "Accounting", 4),
("Smith", "Sales", 5),
("Brown", "Sales", 6),
("Johnson", "Marketing", 7),
("Smith", "Marketing", 8),
("Wilkinson", "IT", 9),
("Moore", "IT", 10),
("Miller", "Management", 11),
("Johnson", "Management", 12),
],
lambda entry: (entry.name, entry.department, entry.row_number),
)
def test_row_number_no_ordering(self):
"""
The row number window function computes the number based on the order
in which the tuples were inserted.
"""
# Add a default ordering for consistent results across databases.
qs = Employee.objects.annotate(
row_number=Window(
expression=RowNumber(),
)
).order_by("pk")
self.assertQuerysetEqual(
qs,
[
("Jones", "Accounting", 1),
("Williams", "Accounting", 2),
("Jenson", "Accounting", 3),
("Adams", "Accounting", 4),
("Smith", "Sales", 5),
("Brown", "Sales", 6),
("Johnson", "Marketing", 7),
("Smith", "Marketing", 8),
("Wilkinson", "IT", 9),
("Moore", "IT", 10),
("Miller", "Management", 11),
("Johnson", "Management", 12),
],
lambda entry: (entry.name, entry.department, entry.row_number),
)
def test_avg_salary_department(self):
qs = Employee.objects.annotate(
avg_salary=Window(
expression=Avg("salary"),
order_by=F("department").asc(),
partition_by="department",
)
).order_by("department", "-salary", "name")
self.assertQuerysetEqual(
qs,
[
("Adams", 50000, "Accounting", 44250.00),
("Jenson", 45000, "Accounting", 44250.00),
("Jones", 45000, "Accounting", 44250.00),
("Williams", 37000, "Accounting", 44250.00),
("Wilkinson", 60000, "IT", 47000.00),
("Moore", 34000, "IT", 47000.00),
("Miller", 100000, "Management", 90000.00),
("Johnson", 80000, "Management", 90000.00),
("Johnson", 40000, "Marketing", 39000.00),
("Smith", 38000, "Marketing", 39000.00),
("Smith", 55000, "Sales", 54000.00),
("Brown", 53000, "Sales", 54000.00),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.avg_salary,
),
)
def test_lag(self):
"""
Compute the difference between an employee's salary and the next
highest salary in the employee's department. Return None if the
employee has the lowest salary.
"""
qs = Employee.objects.annotate(
lag=Window(
expression=Lag(expression="salary", offset=1),
partition_by=F("department"),
order_by=[F("salary").asc(), F("name").asc()],
)
).order_by("department", F("salary").asc(), F("name").asc())
self.assertQuerysetEqual(
qs,
[
("Williams", 37000, "Accounting", None),
("Jenson", 45000, "Accounting", 37000),
("Jones", 45000, "Accounting", 45000),
("Adams", 50000, "Accounting", 45000),
("Moore", 34000, "IT", None),
("Wilkinson", 60000, "IT", 34000),
("Johnson", 80000, "Management", None),
("Miller", 100000, "Management", 80000),
("Smith", 38000, "Marketing", None),
("Johnson", 40000, "Marketing", 38000),
("Brown", 53000, "Sales", None),
("Smith", 55000, "Sales", 53000),
],
transform=lambda row: (row.name, row.salary, row.department, row.lag),
)
def test_lag_decimalfield(self):
qs = Employee.objects.annotate(
lag=Window(
expression=Lag(expression="bonus", offset=1),
partition_by=F("department"),
order_by=[F("bonus").asc(), F("name").asc()],
)
).order_by("department", F("bonus").asc(), F("name").asc())
self.assertQuerysetEqual(
qs,
[
("Williams", 92.5, "Accounting", None),
("Jenson", 112.5, "Accounting", 92.5),
("Jones", 112.5, "Accounting", 112.5),
("Adams", 125, "Accounting", 112.5),
("Moore", 85, "IT", None),
("Wilkinson", 150, "IT", 85),
("Johnson", 200, "Management", None),
("Miller", 250, "Management", 200),
("Smith", 95, "Marketing", None),
("Johnson", 100, "Marketing", 95),
("Brown", 132.5, "Sales", None),
("Smith", 137.5, "Sales", 132.5),
],
transform=lambda row: (row.name, row.bonus, row.department, row.lag),
)
def test_first_value(self):
qs = Employee.objects.annotate(
first_value=Window(
expression=FirstValue("salary"),
partition_by=F("department"),
order_by=F("hire_date").asc(),
)
).order_by("department", "hire_date")
self.assertQuerysetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 45000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 45000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 45000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 60000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 60000),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 100000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 100000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 38000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 38000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 55000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 55000),
],
lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.first_value,
),
)
def test_last_value(self):
qs = Employee.objects.annotate(
last_value=Window(
expression=LastValue("hire_date"),
partition_by=F("department"),
order_by=F("hire_date").asc(),
)
)
self.assertQuerysetEqual(
qs,
[
(
"Adams",
"Accounting",
datetime.date(2013, 7, 1),
50000,
datetime.date(2013, 7, 1),
),
(
"Jenson",
"Accounting",
datetime.date(2008, 4, 1),
45000,
datetime.date(2008, 4, 1),
),
(
"Jones",
"Accounting",
datetime.date(2005, 11, 1),
45000,
datetime.date(2005, 11, 1),
),
(
"Williams",
"Accounting",
datetime.date(2009, 6, 1),
37000,
datetime.date(2009, 6, 1),
),
(
"Moore",
"IT",
datetime.date(2013, 8, 1),
34000,
datetime.date(2013, 8, 1),
),
(
"Wilkinson",
"IT",
datetime.date(2011, 3, 1),
60000,
datetime.date(2011, 3, 1),
),
(
"Miller",
"Management",
datetime.date(2005, 6, 1),
100000,
datetime.date(2005, 6, 1),
),
(
"Johnson",
"Management",
datetime.date(2005, 7, 1),
80000,
datetime.date(2005, 7, 1),
),
(
"Johnson",
"Marketing",
datetime.date(2012, 3, 1),
40000,
datetime.date(2012, 3, 1),
),
(
"Smith",
"Marketing",
datetime.date(2009, 10, 1),
38000,
datetime.date(2009, 10, 1),
),
(
"Brown",
"Sales",
datetime.date(2009, 9, 1),
53000,
datetime.date(2009, 9, 1),
),
(
"Smith",
"Sales",
datetime.date(2007, 6, 1),
55000,
datetime.date(2007, 6, 1),
),
],
transform=lambda row: (
row.name,
row.department,
row.hire_date,
row.salary,
row.last_value,
),
ordered=False,
)
def test_function_list_of_values(self):
qs = (
Employee.objects.annotate(
lead=Window(
expression=Lead(expression="salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
partition_by="department",
)
)
.values_list("name", "salary", "department", "hire_date", "lead")
.order_by("department", F("hire_date").asc(), F("name").desc())
)
self.assertNotIn("GROUP BY", str(qs.query))
self.assertSequenceEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 37000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 50000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), None),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 34000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), None),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 80000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), None),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 40000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), None),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 53000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), None),
],
)
def test_min_department(self):
"""An alternative way to specify a query for FirstValue."""
qs = Employee.objects.annotate(
min_salary=Window(
expression=Min("salary"),
partition_by=F("department"),
order_by=[F("salary").asc(), F("name").asc()],
)
).order_by("department", "salary", "name")
self.assertQuerysetEqual(
qs,
[
("Williams", "Accounting", 37000, 37000),
("Jenson", "Accounting", 45000, 37000),
("Jones", "Accounting", 45000, 37000),
("Adams", "Accounting", 50000, 37000),
("Moore", "IT", 34000, 34000),
("Wilkinson", "IT", 60000, 34000),
("Johnson", "Management", 80000, 80000),
("Miller", "Management", 100000, 80000),
("Smith", "Marketing", 38000, 38000),
("Johnson", "Marketing", 40000, 38000),
("Brown", "Sales", 53000, 53000),
("Smith", "Sales", 55000, 53000),
],
lambda row: (row.name, row.department, row.salary, row.min_salary),
)
def test_max_per_year(self):
"""
Find the maximum salary awarded in the same year as the
employee was hired, regardless of the department.
"""
qs = Employee.objects.annotate(
max_salary_year=Window(
expression=Max("salary"),
order_by=ExtractYear("hire_date").asc(),
partition_by=ExtractYear("hire_date"),
)
).order_by(ExtractYear("hire_date"), "salary")
self.assertQuerysetEqual(
qs,
[
("Jones", "Accounting", 45000, 2005, 100000),
("Johnson", "Management", 80000, 2005, 100000),
("Miller", "Management", 100000, 2005, 100000),
("Smith", "Sales", 55000, 2007, 55000),
("Jenson", "Accounting", 45000, 2008, 45000),
("Williams", "Accounting", 37000, 2009, 53000),
("Smith", "Marketing", 38000, 2009, 53000),
("Brown", "Sales", 53000, 2009, 53000),
("Wilkinson", "IT", 60000, 2011, 60000),
("Johnson", "Marketing", 40000, 2012, 40000),
("Moore", "IT", 34000, 2013, 50000),
("Adams", "Accounting", 50000, 2013, 50000),
],
lambda row: (
row.name,
row.department,
row.salary,
row.hire_date.year,
row.max_salary_year,
),
)
def test_cume_dist(self):
"""
Compute the cumulative distribution for the employees based on the
salary in increasing order. Equal to rank/total number of rows (12).
"""
qs = Employee.objects.annotate(
cume_dist=Window(
expression=CumeDist(),
order_by=F("salary").asc(),
)
).order_by("salary", "name")
# Round result of cume_dist because Oracle uses greater precision.
self.assertQuerysetEqual(
qs,
[
("Moore", "IT", 34000, 0.0833333333),
("Williams", "Accounting", 37000, 0.1666666667),
("Smith", "Marketing", 38000, 0.25),
("Johnson", "Marketing", 40000, 0.3333333333),
("Jenson", "Accounting", 45000, 0.5),
("Jones", "Accounting", 45000, 0.5),
("Adams", "Accounting", 50000, 0.5833333333),
("Brown", "Sales", 53000, 0.6666666667),
("Smith", "Sales", 55000, 0.75),
("Wilkinson", "IT", 60000, 0.8333333333),
("Johnson", "Management", 80000, 0.9166666667),
("Miller", "Management", 100000, 1),
],
lambda row: (
row.name,
row.department,
row.salary,
round(row.cume_dist, 10),
),
)
def test_nthvalue(self):
qs = Employee.objects.annotate(
nth_value=Window(
expression=NthValue(expression="salary", nth=2),
order_by=[F("hire_date").asc(), F("name").desc()],
partition_by=F("department"),
)
).order_by("department", "hire_date", "name")
self.assertQuerysetEqual(
qs,
[
("Jones", "Accounting", datetime.date(2005, 11, 1), 45000, None),
("Jenson", "Accounting", datetime.date(2008, 4, 1), 45000, 45000),
("Williams", "Accounting", datetime.date(2009, 6, 1), 37000, 45000),
("Adams", "Accounting", datetime.date(2013, 7, 1), 50000, 45000),
("Wilkinson", "IT", datetime.date(2011, 3, 1), 60000, None),
("Moore", "IT", datetime.date(2013, 8, 1), 34000, 34000),
("Miller", "Management", datetime.date(2005, 6, 1), 100000, None),
("Johnson", "Management", datetime.date(2005, 7, 1), 80000, 80000),
("Smith", "Marketing", datetime.date(2009, 10, 1), 38000, None),
("Johnson", "Marketing", datetime.date(2012, 3, 1), 40000, 40000),
("Smith", "Sales", datetime.date(2007, 6, 1), 55000, None),
("Brown", "Sales", datetime.date(2009, 9, 1), 53000, 53000),
],
lambda row: (
row.name,
row.department,
row.hire_date,
row.salary,
row.nth_value,
),
)
def test_lead(self):
"""
Determine what the next person hired in the same department makes.
Because the dataset is ambiguous, the name is also part of the
ordering clause. No default is provided, so None/NULL should be
returned.
"""
qs = Employee.objects.annotate(
lead=Window(
expression=Lead(expression="salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
partition_by="department",
)
).order_by("department", F("hire_date").asc(), F("name").desc())
self.assertQuerysetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 37000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 50000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), None),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 34000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), None),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 80000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), None),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 40000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), None),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 53000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), None),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.lead,
),
)
def test_lead_offset(self):
"""
Determine what the person hired after someone makes. Due to
ambiguity, the name is also included in the ordering.
"""
qs = Employee.objects.annotate(
lead=Window(
expression=Lead("salary", offset=2),
partition_by="department",
order_by=F("hire_date").asc(),
)
)
self.assertQuerysetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 37000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 50000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), None),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), None),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), None),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), None),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), None),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), None),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), None),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), None),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), None),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), None),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.lead,
),
ordered=False,
)
@skipUnlessDBFeature("supports_default_in_lead_lag")
def test_lead_default(self):
qs = Employee.objects.annotate(
lead_default=Window(
expression=Lead(expression="salary", offset=5, default=60000),
partition_by=F("department"),
order_by=F("department").asc(),
)
)
self.assertEqual(
list(qs.values_list("lead_default", flat=True).distinct()), [60000]
)
def test_ntile(self):
"""
Compute the group for each of the employees across the entire company,
based on how high the salary is for them. There are twelve employees
so it divides evenly into four groups.
"""
qs = Employee.objects.annotate(
ntile=Window(
expression=Ntile(num_buckets=4),
order_by="-salary",
)
).order_by("ntile", "-salary", "name")
self.assertQuerysetEqual(
qs,
[
("Miller", "Management", 100000, 1),
("Johnson", "Management", 80000, 1),
("Wilkinson", "IT", 60000, 1),
("Smith", "Sales", 55000, 2),
("Brown", "Sales", 53000, 2),
("Adams", "Accounting", 50000, 2),
("Jenson", "Accounting", 45000, 3),
("Jones", "Accounting", 45000, 3),
("Johnson", "Marketing", 40000, 3),
("Smith", "Marketing", 38000, 4),
("Williams", "Accounting", 37000, 4),
("Moore", "IT", 34000, 4),
],
lambda x: (x.name, x.department, x.salary, x.ntile),
)
def test_percent_rank(self):
"""
Calculate the percentage rank of the employees across the entire
company based on salary and name (in case of ambiguity).
"""
qs = Employee.objects.annotate(
percent_rank=Window(
expression=PercentRank(),
order_by=[F("salary").asc(), F("name").asc()],
)
).order_by("percent_rank")
# Round to account for precision differences among databases.
self.assertQuerysetEqual(
qs,
[
("Moore", "IT", 34000, 0.0),
("Williams", "Accounting", 37000, 0.0909090909),
("Smith", "Marketing", 38000, 0.1818181818),
("Johnson", "Marketing", 40000, 0.2727272727),
("Jenson", "Accounting", 45000, 0.3636363636),
("Jones", "Accounting", 45000, 0.4545454545),
("Adams", "Accounting", 50000, 0.5454545455),
("Brown", "Sales", 53000, 0.6363636364),
("Smith", "Sales", 55000, 0.7272727273),
("Wilkinson", "IT", 60000, 0.8181818182),
("Johnson", "Management", 80000, 0.9090909091),
("Miller", "Management", 100000, 1.0),
],
transform=lambda row: (
row.name,
row.department,
row.salary,
round(row.percent_rank, 10),
),
)
def test_nth_returns_null(self):
"""
Find the nth row of the data set. None is returned since there are
fewer than 20 rows in the test data.
"""
qs = Employee.objects.annotate(
nth_value=Window(
expression=NthValue("salary", nth=20), order_by=F("salary").asc()
)
)
self.assertEqual(
list(qs.values_list("nth_value", flat=True).distinct()), [None]
)
def test_multiple_partitioning(self):
"""
Find the maximum salary for each department for people hired in the
same year.
"""
qs = Employee.objects.annotate(
max=Window(
expression=Max("salary"),
partition_by=[F("department"), F("hire_date__year")],
)
).order_by("department", "hire_date", "name")
self.assertQuerysetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 45000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 37000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 50000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 60000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 34000),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 100000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 100000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 38000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 40000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 55000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 53000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.max,
),
)
def test_multiple_ordering(self):
"""
Accumulate the salaries over the departments based on hire_date.
If two people were hired on the same date in the same department, the
ordering clause will render a different result for those people.
"""
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
partition_by="department",
order_by=[F("hire_date").asc(), F("name").asc()],
)
).order_by("department", "sum")
self.assertQuerysetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 90000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 127000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 177000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 60000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 94000),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 100000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 180000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 38000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 78000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 55000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 108000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.sum,
),
)
def test_related_ordering_with_count(self):
qs = Employee.objects.annotate(
department_sum=Window(
expression=Sum("salary"),
partition_by=F("department"),
order_by=["classification__code"],
)
)
self.assertEqual(qs.count(), 12)
def test_filter(self):
qs = Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
),
department_avg_age_diff=(
Window(Avg("age"), partition_by="department") - F("age")
),
).order_by("department", "name")
# Direct window reference.
self.assertQuerysetEqual(
qs.filter(department_salary_rank=1),
["Adams", "Wilkinson", "Miller", "Johnson", "Smith"],
lambda employee: employee.name,
)
# Through a combined expression containing a window.
self.assertQuerysetEqual(
qs.filter(department_avg_age_diff__gt=0),
["Jenson", "Jones", "Williams", "Miller", "Smith"],
lambda employee: employee.name,
)
# Intersection of multiple windows.
self.assertQuerysetEqual(
qs.filter(department_salary_rank=1, department_avg_age_diff__gt=0),
["Miller"],
lambda employee: employee.name,
)
# Union of multiple windows.
self.assertQuerysetEqual(
qs.filter(Q(department_salary_rank=1) | Q(department_avg_age_diff__gt=0)),
[
"Adams",
"Jenson",
"Jones",
"Williams",
"Wilkinson",
"Miller",
"Johnson",
"Smith",
"Smith",
],
lambda employee: employee.name,
)
def test_filter_conditional_annotation(self):
qs = (
Employee.objects.annotate(
rank=Window(Rank(), partition_by="department", order_by="-salary"),
case_first_rank=Case(
When(rank=1, then=True),
default=False,
),
q_first_rank=Q(rank=1),
)
.order_by("name")
.values_list("name", flat=True)
)
for annotation in ["case_first_rank", "q_first_rank"]:
with self.subTest(annotation=annotation):
self.assertSequenceEqual(
qs.filter(**{annotation: True}),
["Adams", "Johnson", "Miller", "Smith", "Wilkinson"],
)
def test_filter_conditional_expression(self):
qs = (
Employee.objects.filter(
Exact(Window(Rank(), partition_by="department", order_by="-salary"), 1)
)
.order_by("name")
.values_list("name", flat=True)
)
self.assertSequenceEqual(
qs, ["Adams", "Johnson", "Miller", "Smith", "Wilkinson"]
)
def test_filter_column_ref_rhs(self):
qs = (
Employee.objects.annotate(
max_dept_salary=Window(Max("salary"), partition_by="department")
)
.filter(max_dept_salary=F("salary"))
.order_by("name")
.values_list("name", flat=True)
)
self.assertSequenceEqual(
qs, ["Adams", "Johnson", "Miller", "Smith", "Wilkinson"]
)
def test_filter_values(self):
qs = (
Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
),
)
.order_by("department", "name")
.values_list(Upper("name"), flat=True)
)
self.assertSequenceEqual(
qs.filter(department_salary_rank=1),
["ADAMS", "WILKINSON", "MILLER", "JOHNSON", "SMITH"],
)
def test_filter_alias(self):
qs = Employee.objects.alias(
department_avg_age_diff=(
Window(Avg("age"), partition_by="department") - F("age")
),
).order_by("department", "name")
self.assertQuerysetEqual(
qs.filter(department_avg_age_diff__gt=0),
["Jenson", "Jones", "Williams", "Miller", "Smith"],
lambda employee: employee.name,
)
def test_filter_select_related(self):
qs = (
Employee.objects.alias(
department_avg_age_diff=(
Window(Avg("age"), partition_by="department") - F("age")
),
)
.select_related("classification")
.filter(department_avg_age_diff__gt=0)
.order_by("department", "name")
)
self.assertQuerysetEqual(
qs,
["Jenson", "Jones", "Williams", "Miller", "Smith"],
lambda employee: employee.name,
)
with self.assertNumQueries(0):
qs[0].classification
def test_exclude(self):
qs = Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
),
department_avg_age_diff=(
Window(Avg("age"), partition_by="department") - F("age")
),
).order_by("department", "name")
# Direct window reference.
self.assertQuerysetEqual(
qs.exclude(department_salary_rank__gt=1),
["Adams", "Wilkinson", "Miller", "Johnson", "Smith"],
lambda employee: employee.name,
)
# Through a combined expression containing a window.
self.assertQuerysetEqual(
qs.exclude(department_avg_age_diff__lte=0),
["Jenson", "Jones", "Williams", "Miller", "Smith"],
lambda employee: employee.name,
)
# Union of multiple windows.
self.assertQuerysetEqual(
qs.exclude(
Q(department_salary_rank__gt=1) | Q(department_avg_age_diff__lte=0)
),
["Miller"],
lambda employee: employee.name,
)
# Intersection of multiple windows.
self.assertQuerysetEqual(
qs.exclude(department_salary_rank__gt=1, department_avg_age_diff__lte=0),
[
"Adams",
"Jenson",
"Jones",
"Williams",
"Wilkinson",
"Miller",
"Johnson",
"Smith",
"Smith",
],
lambda employee: employee.name,
)
def test_heterogeneous_filter(self):
qs = (
Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
),
)
.order_by("name")
.values_list("name", flat=True)
)
# Heterogeneous filter between window function and aggregates pushes
# the WHERE clause to the QUALIFY outer query.
self.assertSequenceEqual(
qs.filter(
department_salary_rank=1, department__in=["Accounting", "Management"]
),
["Adams", "Miller"],
)
self.assertSequenceEqual(
qs.filter(
Q(department_salary_rank=1)
| Q(department__in=["Accounting", "Management"])
),
[
"Adams",
"Jenson",
"Johnson",
"Johnson",
"Jones",
"Miller",
"Smith",
"Wilkinson",
"Williams",
],
)
# Heterogeneous filter between window function and aggregates pushes
# the HAVING clause to the QUALIFY outer query.
qs = qs.annotate(past_department_count=Count("past_departments"))
self.assertSequenceEqual(
qs.filter(department_salary_rank=1, past_department_count__gte=1),
["Johnson", "Miller"],
)
self.assertSequenceEqual(
qs.filter(Q(department_salary_rank=1) | Q(past_department_count__gte=1)),
["Adams", "Johnson", "Miller", "Smith", "Wilkinson"],
)
def test_limited_filter(self):
"""
A query filtering against a window function have its limit applied
after window filtering takes place.
"""
self.assertQuerysetEqual(
Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
)
)
.filter(department_salary_rank=1)
.order_by("department")[0:3],
["Adams", "Wilkinson", "Miller"],
lambda employee: employee.name,
)
def test_filter_count(self):
self.assertEqual(
Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
)
)
.filter(department_salary_rank=1)
.count(),
5,
)
@skipUnlessDBFeature("supports_frame_range_fixed_distance")
def test_range_n_preceding_and_following(self):
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
order_by=F("salary").asc(),
partition_by="department",
frame=ValueRange(start=-2, end=2),
)
)
self.assertIn("RANGE BETWEEN 2 PRECEDING AND 2 FOLLOWING", str(qs.query))
self.assertQuerysetEqual(
qs,
[
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 37000),
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 90000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 90000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 50000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 53000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 55000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 40000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 38000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 60000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 34000),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 100000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 80000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.sum,
),
ordered=False,
)
def test_range_unbound(self):
"""A query with RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING."""
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
partition_by="age",
order_by=[F("age").asc()],
frame=ValueRange(start=None, end=None),
)
).order_by("department", "hire_date", "name")
self.assertIn(
"RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING", str(qs.query)
)
self.assertQuerysetEqual(
qs,
[
("Jones", "Accounting", 45000, datetime.date(2005, 11, 1), 165000),
("Jenson", "Accounting", 45000, datetime.date(2008, 4, 1), 165000),
("Williams", "Accounting", 37000, datetime.date(2009, 6, 1), 165000),
("Adams", "Accounting", 50000, datetime.date(2013, 7, 1), 130000),
("Wilkinson", "IT", 60000, datetime.date(2011, 3, 1), 194000),
("Moore", "IT", 34000, datetime.date(2013, 8, 1), 194000),
("Miller", "Management", 100000, datetime.date(2005, 6, 1), 194000),
("Johnson", "Management", 80000, datetime.date(2005, 7, 1), 130000),
("Smith", "Marketing", 38000, datetime.date(2009, 10, 1), 165000),
("Johnson", "Marketing", 40000, datetime.date(2012, 3, 1), 148000),
("Smith", "Sales", 55000, datetime.date(2007, 6, 1), 148000),
("Brown", "Sales", 53000, datetime.date(2009, 9, 1), 148000),
],
transform=lambda row: (
row.name,
row.department,
row.salary,
row.hire_date,
row.sum,
),
)
def test_subquery_row_range_rank(self):
qs = Employee.objects.annotate(
highest_avg_salary_date=Subquery(
Employee.objects.filter(
department=OuterRef("department"),
)
.annotate(
avg_salary=Window(
expression=Avg("salary"),
order_by=[F("hire_date").asc()],
frame=RowRange(start=-1, end=1),
),
)
.order_by("-avg_salary", "hire_date")
.values("hire_date")[:1],
),
).order_by("department", "name")
self.assertQuerysetEqual(
qs,
[
("Adams", "Accounting", datetime.date(2005, 11, 1)),
("Jenson", "Accounting", datetime.date(2005, 11, 1)),
("Jones", "Accounting", datetime.date(2005, 11, 1)),
("Williams", "Accounting", datetime.date(2005, 11, 1)),
("Moore", "IT", datetime.date(2011, 3, 1)),
("Wilkinson", "IT", datetime.date(2011, 3, 1)),
("Johnson", "Management", datetime.date(2005, 6, 1)),
("Miller", "Management", datetime.date(2005, 6, 1)),
("Johnson", "Marketing", datetime.date(2009, 10, 1)),
("Smith", "Marketing", datetime.date(2009, 10, 1)),
("Brown", "Sales", datetime.date(2007, 6, 1)),
("Smith", "Sales", datetime.date(2007, 6, 1)),
],
transform=lambda row: (
row.name,
row.department,
row.highest_avg_salary_date,
),
)
def test_row_range_rank(self):
"""
A query with ROWS BETWEEN UNBOUNDED PRECEDING AND 3 FOLLOWING.
The resulting sum is the sum of the three next (if they exist) and all
previous rows according to the ordering clause.
"""
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
frame=RowRange(start=None, end=3),
)
).order_by("sum", "hire_date")
self.assertIn("ROWS BETWEEN UNBOUNDED PRECEDING AND 3 FOLLOWING", str(qs.query))
self.assertQuerysetEqual(
qs,
[
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 280000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 325000),
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 362000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 415000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 453000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 513000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 553000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 603000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 637000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 637000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 637000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 637000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.sum,
),
)
@skipUnlessDBFeature("can_distinct_on_fields")
def test_distinct_window_function(self):
"""
Window functions are not aggregates, and hence a query to filter out
duplicates may be useful.
"""
qs = (
Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
partition_by=ExtractYear("hire_date"),
order_by=ExtractYear("hire_date"),
),
year=ExtractYear("hire_date"),
)
.filter(sum__gte=45000)
.values("year", "sum")
.distinct("year")
.order_by("year")
)
results = [
{"year": 2005, "sum": 225000},
{"year": 2007, "sum": 55000},
{"year": 2008, "sum": 45000},
{"year": 2009, "sum": 128000},
{"year": 2011, "sum": 60000},
{"year": 2013, "sum": 84000},
]
for idx, val in zip(range(len(results)), results):
with self.subTest(result=val):
self.assertEqual(qs[idx], val)
def test_fail_update(self):
"""Window expressions can't be used in an UPDATE statement."""
msg = (
"Window expressions are not allowed in this query (salary=<Window: "
"Max(Col(expressions_window_employee, expressions_window.Employee.salary)) "
"OVER (PARTITION BY Col(expressions_window_employee, "
"expressions_window.Employee.department))>)."
)
with self.assertRaisesMessage(FieldError, msg):
Employee.objects.filter(department="Management").update(
salary=Window(expression=Max("salary"), partition_by="department"),
)
def test_fail_insert(self):
"""Window expressions can't be used in an INSERT statement."""
msg = (
"Window expressions are not allowed in this query (salary=<Window: "
"Sum(Value(10000), order_by=OrderBy(F(pk), descending=False)) OVER ()"
)
with self.assertRaisesMessage(FieldError, msg):
Employee.objects.create(
name="Jameson",
department="Management",
hire_date=datetime.date(2007, 7, 1),
salary=Window(expression=Sum(Value(10000), order_by=F("pk").asc())),
)
def test_window_expression_within_subquery(self):
subquery_qs = Employee.objects.annotate(
highest=Window(
FirstValue("id"),
partition_by=F("department"),
order_by=F("salary").desc(),
)
).values("highest")
highest_salary = Employee.objects.filter(pk__in=subquery_qs)
self.assertCountEqual(
highest_salary.values("department", "salary"),
[
{"department": "Accounting", "salary": 50000},
{"department": "Sales", "salary": 55000},
{"department": "Marketing", "salary": 40000},
{"department": "IT", "salary": 60000},
{"department": "Management", "salary": 100000},
],
)
@skipUnlessDBFeature("supports_json_field")
def test_key_transform(self):
Detail.objects.bulk_create(
[
Detail(value={"department": "IT", "name": "Smith", "salary": 37000}),
Detail(value={"department": "IT", "name": "Nowak", "salary": 32000}),
Detail(value={"department": "HR", "name": "Brown", "salary": 50000}),
Detail(value={"department": "HR", "name": "Smith", "salary": 55000}),
Detail(value={"department": "PR", "name": "Moore", "salary": 90000}),
]
)
tests = [
(KeyTransform("department", "value"), KeyTransform("name", "value")),
(F("value__department"), F("value__name")),
]
for partition_by, order_by in tests:
with self.subTest(partition_by=partition_by, order_by=order_by):
qs = Detail.objects.annotate(
department_sum=Window(
expression=Sum(
Cast(
KeyTextTransform("salary", "value"),
output_field=IntegerField(),
)
),
partition_by=[partition_by],
order_by=[order_by],
)
).order_by("value__department", "department_sum")
self.assertQuerysetEqual(
qs,
[
("Brown", "HR", 50000, 50000),
("Smith", "HR", 55000, 105000),
("Nowak", "IT", 32000, 32000),
("Smith", "IT", 37000, 69000),
("Moore", "PR", 90000, 90000),
],
lambda entry: (
entry.value["name"],
entry.value["department"],
entry.value["salary"],
entry.department_sum,
),
)
def test_invalid_start_value_range(self):
msg = "start argument must be a negative integer, zero, or None, but got '3'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(start=3),
)
)
)
def test_invalid_end_value_range(self):
msg = "end argument must be a positive integer, zero, or None, but got '-3'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(end=-3),
)
)
)
def test_invalid_type_end_value_range(self):
msg = "end argument must be a positive integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(end="a"),
)
)
)
def test_invalid_type_start_value_range(self):
msg = "start argument must be a negative integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
frame=ValueRange(start="a"),
)
)
)
def test_invalid_type_end_row_range(self):
msg = "end argument must be a positive integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
frame=RowRange(end="a"),
)
)
)
@skipUnlessDBFeature("only_supports_unbounded_with_preceding_and_following")
def test_unsupported_range_frame_start(self):
msg = (
"%s only supports UNBOUNDED together with PRECEDING and FOLLOWING."
% connection.display_name
)
with self.assertRaisesMessage(NotSupportedError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(start=-1),
)
)
)
@skipUnlessDBFeature("only_supports_unbounded_with_preceding_and_following")
def test_unsupported_range_frame_end(self):
msg = (
"%s only supports UNBOUNDED together with PRECEDING and FOLLOWING."
% connection.display_name
)
with self.assertRaisesMessage(NotSupportedError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(end=1),
)
)
)
def test_invalid_type_start_row_range(self):
msg = "start argument must be a negative integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=RowRange(start="a"),
)
)
)
class WindowUnsupportedTests(TestCase):
def test_unsupported_backend(self):
msg = "This backend does not support window expressions."
with mock.patch.object(connection.features, "supports_over_clause", False):
with self.assertRaisesMessage(NotSupportedError, msg):
Employee.objects.annotate(
dense_rank=Window(expression=DenseRank())
).get()
class NonQueryWindowTests(SimpleTestCase):
def test_window_repr(self):
self.assertEqual(
repr(Window(expression=Sum("salary"), partition_by="department")),
"<Window: Sum(F(salary)) OVER (PARTITION BY F(department))>",
)
self.assertEqual(
repr(Window(expression=Avg("salary"), order_by=F("department").asc())),
"<Window: Avg(F(salary)) OVER (OrderByList(OrderBy(F(department), "
"descending=False)))>",
)
def test_window_frame_repr(self):
self.assertEqual(
repr(RowRange(start=-1)),
"<RowRange: ROWS BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING>",
)
self.assertEqual(
repr(ValueRange(start=None, end=1)),
"<ValueRange: RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING>",
)
self.assertEqual(
repr(ValueRange(start=0, end=0)),
"<ValueRange: RANGE BETWEEN CURRENT ROW AND CURRENT ROW>",
)
self.assertEqual(
repr(RowRange(start=0, end=0)),
"<RowRange: ROWS BETWEEN CURRENT ROW AND CURRENT ROW>",
)
def test_empty_group_by_cols(self):
window = Window(expression=Sum("pk"))
self.assertEqual(window.get_group_by_cols(), [])
self.assertFalse(window.contains_aggregate)
def test_frame_empty_group_by_cols(self):
frame = WindowFrame()
self.assertEqual(frame.get_group_by_cols(), [])
def test_frame_window_frame_notimplemented(self):
frame = WindowFrame()
msg = "Subclasses must implement window_frame_start_end()."
with self.assertRaisesMessage(NotImplementedError, msg):
frame.window_frame_start_end(None, None, None)
def test_invalid_filter(self):
msg = (
"Heterogeneous disjunctive predicates against window functions are not "
"implemented when performing conditional aggregation."
)
qs = Employee.objects.annotate(
window=Window(Rank()),
past_dept_cnt=Count("past_departments"),
)
with self.assertRaisesMessage(NotImplementedError, msg):
list(qs.filter(Q(window=1) | Q(department="Accounting")))
with self.assertRaisesMessage(NotImplementedError, msg):
list(qs.exclude(window=1, department="Accounting"))
def test_invalid_order_by(self):
msg = (
"Window.order_by must be either a string reference to a field, an "
"expression, or a list or tuple of them."
)
with self.assertRaisesMessage(ValueError, msg):
Window(expression=Sum("power"), order_by={"-horse"})
def test_invalid_source_expression(self):
msg = "Expression 'Upper' isn't compatible with OVER clauses."
with self.assertRaisesMessage(ValueError, msg):
Window(expression=Upper("name"))
|
21d67ba64a447062eb39245b8c1492c4be0b3a29e2646291c9143efcc69ea3fc | import json
from django import forms
from django.core import checks, exceptions
from django.db import NotSupportedError, connections, router
from django.db.models import lookups
from django.db.models.fields import TextField
from django.db.models.lookups import PostgresOperatorLookup, Transform
from django.utils.translation import gettext_lazy as _
from . import Field
from .mixins import CheckFieldDefaultMixin
__all__ = ["JSONField"]
class JSONField(CheckFieldDefaultMixin, Field):
empty_strings_allowed = False
description = _("A JSON object")
default_error_messages = {
"invalid": _("Value must be valid JSON."),
}
_default_hint = ("dict", "{}")
def __init__(
self,
verbose_name=None,
name=None,
encoder=None,
decoder=None,
**kwargs,
):
if encoder and not callable(encoder):
raise ValueError("The encoder parameter must be a callable object.")
if decoder and not callable(decoder):
raise ValueError("The decoder parameter must be a callable object.")
self.encoder = encoder
self.decoder = decoder
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
databases = kwargs.get("databases") or []
errors.extend(self._check_supported(databases))
return errors
def _check_supported(self, databases):
errors = []
for db in databases:
if not router.allow_migrate_model(db, self.model):
continue
connection = connections[db]
if (
self.model._meta.required_db_vendor
and self.model._meta.required_db_vendor != connection.vendor
):
continue
if not (
"supports_json_field" in self.model._meta.required_db_features
or connection.features.supports_json_field
):
errors.append(
checks.Error(
"%s does not support JSONFields." % connection.display_name,
obj=self.model,
id="fields.E180",
)
)
return errors
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.encoder is not None:
kwargs["encoder"] = self.encoder
if self.decoder is not None:
kwargs["decoder"] = self.decoder
return name, path, args, kwargs
def from_db_value(self, value, expression, connection):
if value is None:
return value
# Some backends (SQLite at least) extract non-string values in their
# SQL datatypes.
if isinstance(expression, KeyTransform) and not isinstance(value, str):
return value
try:
return json.loads(value, cls=self.decoder)
except json.JSONDecodeError:
return value
def get_internal_type(self):
return "JSONField"
def get_prep_value(self, value):
if value is None:
return value
return json.dumps(value, cls=self.encoder)
def get_transform(self, name):
transform = super().get_transform(name)
if transform:
return transform
return KeyTransformFactory(name)
def validate(self, value, model_instance):
super().validate(value, model_instance)
try:
json.dumps(value, cls=self.encoder)
except TypeError:
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
def value_to_string(self, obj):
return self.value_from_object(obj)
def formfield(self, **kwargs):
return super().formfield(
**{
"form_class": forms.JSONField,
"encoder": self.encoder,
"decoder": self.decoder,
**kwargs,
}
)
def compile_json_path(key_transforms, include_root=True):
path = ["$"] if include_root else []
for key_transform in key_transforms:
try:
num = int(key_transform)
except ValueError: # non-integer
path.append(".")
path.append(json.dumps(key_transform))
else:
path.append("[%s]" % num)
return "".join(path)
class DataContains(PostgresOperatorLookup):
lookup_name = "contains"
postgres_operator = "@>"
def as_sql(self, compiler, connection):
if not connection.features.supports_json_field_contains:
raise NotSupportedError(
"contains lookup is not supported on this database backend."
)
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = tuple(lhs_params) + tuple(rhs_params)
return "JSON_CONTAINS(%s, %s)" % (lhs, rhs), params
class ContainedBy(PostgresOperatorLookup):
lookup_name = "contained_by"
postgres_operator = "<@"
def as_sql(self, compiler, connection):
if not connection.features.supports_json_field_contains:
raise NotSupportedError(
"contained_by lookup is not supported on this database backend."
)
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = tuple(rhs_params) + tuple(lhs_params)
return "JSON_CONTAINS(%s, %s)" % (rhs, lhs), params
class HasKeyLookup(PostgresOperatorLookup):
logical_operator = None
def compile_json_path_final_key(self, key_transform):
# Compile the final key without interpreting ints as array elements.
return ".%s" % json.dumps(key_transform)
def as_sql(self, compiler, connection, template=None):
# Process JSON path from the left-hand side.
if isinstance(self.lhs, KeyTransform):
lhs, lhs_params, lhs_key_transforms = self.lhs.preprocess_lhs(
compiler, connection
)
lhs_json_path = compile_json_path(lhs_key_transforms)
else:
lhs, lhs_params = self.process_lhs(compiler, connection)
lhs_json_path = "$"
sql = template % lhs
# Process JSON path from the right-hand side.
rhs = self.rhs
rhs_params = []
if not isinstance(rhs, (list, tuple)):
rhs = [rhs]
for key in rhs:
if isinstance(key, KeyTransform):
*_, rhs_key_transforms = key.preprocess_lhs(compiler, connection)
else:
rhs_key_transforms = [key]
*rhs_key_transforms, final_key = rhs_key_transforms
rhs_json_path = compile_json_path(rhs_key_transforms, include_root=False)
rhs_json_path += self.compile_json_path_final_key(final_key)
rhs_params.append(lhs_json_path + rhs_json_path)
# Add condition for each key.
if self.logical_operator:
sql = "(%s)" % self.logical_operator.join([sql] * len(rhs_params))
return sql, tuple(lhs_params) + tuple(rhs_params)
def as_mysql(self, compiler, connection):
return self.as_sql(
compiler, connection, template="JSON_CONTAINS_PATH(%s, 'one', %%s)"
)
def as_oracle(self, compiler, connection):
sql, params = self.as_sql(
compiler, connection, template="JSON_EXISTS(%s, '%%s')"
)
# Add paths directly into SQL because path expressions cannot be passed
# as bind variables on Oracle.
return sql % tuple(params), []
def as_postgresql(self, compiler, connection):
if isinstance(self.rhs, KeyTransform):
*_, rhs_key_transforms = self.rhs.preprocess_lhs(compiler, connection)
for key in rhs_key_transforms[:-1]:
self.lhs = KeyTransform(key, self.lhs)
self.rhs = rhs_key_transforms[-1]
return super().as_postgresql(compiler, connection)
def as_sqlite(self, compiler, connection):
return self.as_sql(
compiler, connection, template="JSON_TYPE(%s, %%s) IS NOT NULL"
)
class HasKey(HasKeyLookup):
lookup_name = "has_key"
postgres_operator = "?"
prepare_rhs = False
class HasKeys(HasKeyLookup):
lookup_name = "has_keys"
postgres_operator = "?&"
logical_operator = " AND "
def get_prep_lookup(self):
return [str(item) for item in self.rhs]
class HasAnyKeys(HasKeys):
lookup_name = "has_any_keys"
postgres_operator = "?|"
logical_operator = " OR "
class HasKeyOrArrayIndex(HasKey):
def compile_json_path_final_key(self, key_transform):
return compile_json_path([key_transform], include_root=False)
class CaseInsensitiveMixin:
"""
Mixin to allow case-insensitive comparison of JSON values on MySQL.
MySQL handles strings used in JSON context using the utf8mb4_bin collation.
Because utf8mb4_bin is a binary collation, comparison of JSON values is
case-sensitive.
"""
def process_lhs(self, compiler, connection):
lhs, lhs_params = super().process_lhs(compiler, connection)
if connection.vendor == "mysql":
return "LOWER(%s)" % lhs, lhs_params
return lhs, lhs_params
def process_rhs(self, compiler, connection):
rhs, rhs_params = super().process_rhs(compiler, connection)
if connection.vendor == "mysql":
return "LOWER(%s)" % rhs, rhs_params
return rhs, rhs_params
class JSONExact(lookups.Exact):
can_use_none_as_rhs = True
def process_rhs(self, compiler, connection):
rhs, rhs_params = super().process_rhs(compiler, connection)
# Treat None lookup values as null.
if rhs == "%s" and rhs_params == [None]:
rhs_params = ["null"]
if connection.vendor == "mysql":
func = ["JSON_EXTRACT(%s, '$')"] * len(rhs_params)
rhs = rhs % tuple(func)
return rhs, rhs_params
class JSONIContains(CaseInsensitiveMixin, lookups.IContains):
pass
JSONField.register_lookup(DataContains)
JSONField.register_lookup(ContainedBy)
JSONField.register_lookup(HasKey)
JSONField.register_lookup(HasKeys)
JSONField.register_lookup(HasAnyKeys)
JSONField.register_lookup(JSONExact)
JSONField.register_lookup(JSONIContains)
class KeyTransform(Transform):
postgres_operator = "->"
postgres_nested_operator = "#>"
def __init__(self, key_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.key_name = str(key_name)
def preprocess_lhs(self, compiler, connection):
key_transforms = [self.key_name]
previous = self.lhs
while isinstance(previous, KeyTransform):
key_transforms.insert(0, previous.key_name)
previous = previous.lhs
lhs, params = compiler.compile(previous)
if connection.vendor == "oracle":
# Escape string-formatting.
key_transforms = [key.replace("%", "%%") for key in key_transforms]
return lhs, params, key_transforms
def as_mysql(self, compiler, connection):
lhs, params, key_transforms = self.preprocess_lhs(compiler, connection)
json_path = compile_json_path(key_transforms)
return "JSON_EXTRACT(%s, %%s)" % lhs, tuple(params) + (json_path,)
def as_oracle(self, compiler, connection):
lhs, params, key_transforms = self.preprocess_lhs(compiler, connection)
json_path = compile_json_path(key_transforms)
return (
"COALESCE(JSON_QUERY(%s, '%s'), JSON_VALUE(%s, '%s'))"
% ((lhs, json_path) * 2)
), tuple(params) * 2
def as_postgresql(self, compiler, connection):
lhs, params, key_transforms = self.preprocess_lhs(compiler, connection)
if len(key_transforms) > 1:
sql = "(%s %s %%s)" % (lhs, self.postgres_nested_operator)
return sql, tuple(params) + (key_transforms,)
try:
lookup = int(self.key_name)
except ValueError:
lookup = self.key_name
return "(%s %s %%s)" % (lhs, self.postgres_operator), tuple(params) + (lookup,)
def as_sqlite(self, compiler, connection):
lhs, params, key_transforms = self.preprocess_lhs(compiler, connection)
json_path = compile_json_path(key_transforms)
datatype_values = ",".join(
[repr(datatype) for datatype in connection.ops.jsonfield_datatype_values]
)
return (
"(CASE WHEN JSON_TYPE(%s, %%s) IN (%s) "
"THEN JSON_TYPE(%s, %%s) ELSE JSON_EXTRACT(%s, %%s) END)"
) % (lhs, datatype_values, lhs, lhs), (tuple(params) + (json_path,)) * 3
class KeyTextTransform(KeyTransform):
postgres_operator = "->>"
postgres_nested_operator = "#>>"
output_field = TextField()
def as_mysql(self, compiler, connection):
if connection.mysql_is_mariadb:
# MariaDB doesn't support -> and ->> operators (see MDEV-13594).
sql, params = super().as_mysql(compiler, connection)
return "JSON_UNQUOTE(%s)" % sql, params
else:
lhs, params, key_transforms = self.preprocess_lhs(compiler, connection)
json_path = compile_json_path(key_transforms)
return "(%s ->> %%s)" % lhs, tuple(params) + (json_path,)
class KeyTransformTextLookupMixin:
"""
Mixin for combining with a lookup expecting a text lhs from a JSONField
key lookup. On PostgreSQL, make use of the ->> operator instead of casting
key values to text and performing the lookup on the resulting
representation.
"""
def __init__(self, key_transform, *args, **kwargs):
if not isinstance(key_transform, KeyTransform):
raise TypeError(
"Transform should be an instance of KeyTransform in order to "
"use this lookup."
)
key_text_transform = KeyTextTransform(
key_transform.key_name,
*key_transform.source_expressions,
**key_transform.extra,
)
super().__init__(key_text_transform, *args, **kwargs)
class KeyTransformIsNull(lookups.IsNull):
# key__isnull=False is the same as has_key='key'
def as_oracle(self, compiler, connection):
sql, params = HasKeyOrArrayIndex(
self.lhs.lhs,
self.lhs.key_name,
).as_oracle(compiler, connection)
if not self.rhs:
return sql, params
# Column doesn't have a key or IS NULL.
lhs, lhs_params, _ = self.lhs.preprocess_lhs(compiler, connection)
return "(NOT %s OR %s IS NULL)" % (sql, lhs), tuple(params) + tuple(lhs_params)
def as_sqlite(self, compiler, connection):
template = "JSON_TYPE(%s, %%s) IS NULL"
if not self.rhs:
template = "JSON_TYPE(%s, %%s) IS NOT NULL"
return HasKeyOrArrayIndex(self.lhs.lhs, self.lhs.key_name).as_sql(
compiler,
connection,
template=template,
)
class KeyTransformIn(lookups.In):
def resolve_expression_parameter(self, compiler, connection, sql, param):
sql, params = super().resolve_expression_parameter(
compiler,
connection,
sql,
param,
)
if (
not hasattr(param, "as_sql")
and not connection.features.has_native_json_field
):
if connection.vendor == "oracle":
value = json.loads(param)
sql = "%s(JSON_OBJECT('value' VALUE %%s FORMAT JSON), '$.value')"
if isinstance(value, (list, dict)):
sql = sql % "JSON_QUERY"
else:
sql = sql % "JSON_VALUE"
elif connection.vendor == "mysql" or (
connection.vendor == "sqlite"
and params[0] not in connection.ops.jsonfield_datatype_values
):
sql = "JSON_EXTRACT(%s, '$')"
if connection.vendor == "mysql" and connection.mysql_is_mariadb:
sql = "JSON_UNQUOTE(%s)" % sql
return sql, params
class KeyTransformExact(JSONExact):
def process_rhs(self, compiler, connection):
if isinstance(self.rhs, KeyTransform):
return super(lookups.Exact, self).process_rhs(compiler, connection)
rhs, rhs_params = super().process_rhs(compiler, connection)
if connection.vendor == "oracle":
func = []
sql = "%s(JSON_OBJECT('value' VALUE %%s FORMAT JSON), '$.value')"
for value in rhs_params:
value = json.loads(value)
if isinstance(value, (list, dict)):
func.append(sql % "JSON_QUERY")
else:
func.append(sql % "JSON_VALUE")
rhs = rhs % tuple(func)
elif connection.vendor == "sqlite":
func = []
for value in rhs_params:
if value in connection.ops.jsonfield_datatype_values:
func.append("%s")
else:
func.append("JSON_EXTRACT(%s, '$')")
rhs = rhs % tuple(func)
return rhs, rhs_params
def as_oracle(self, compiler, connection):
rhs, rhs_params = super().process_rhs(compiler, connection)
if rhs_params == ["null"]:
# Field has key and it's NULL.
has_key_expr = HasKeyOrArrayIndex(self.lhs.lhs, self.lhs.key_name)
has_key_sql, has_key_params = has_key_expr.as_oracle(compiler, connection)
is_null_expr = self.lhs.get_lookup("isnull")(self.lhs, True)
is_null_sql, is_null_params = is_null_expr.as_sql(compiler, connection)
return (
"%s AND %s" % (has_key_sql, is_null_sql),
tuple(has_key_params) + tuple(is_null_params),
)
return super().as_sql(compiler, connection)
class KeyTransformIExact(
CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IExact
):
pass
class KeyTransformIContains(
CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IContains
):
pass
class KeyTransformStartsWith(KeyTransformTextLookupMixin, lookups.StartsWith):
pass
class KeyTransformIStartsWith(
CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IStartsWith
):
pass
class KeyTransformEndsWith(KeyTransformTextLookupMixin, lookups.EndsWith):
pass
class KeyTransformIEndsWith(
CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IEndsWith
):
pass
class KeyTransformRegex(KeyTransformTextLookupMixin, lookups.Regex):
pass
class KeyTransformIRegex(
CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IRegex
):
pass
class KeyTransformNumericLookupMixin:
def process_rhs(self, compiler, connection):
rhs, rhs_params = super().process_rhs(compiler, connection)
if not connection.features.has_native_json_field:
rhs_params = [json.loads(value) for value in rhs_params]
return rhs, rhs_params
class KeyTransformLt(KeyTransformNumericLookupMixin, lookups.LessThan):
pass
class KeyTransformLte(KeyTransformNumericLookupMixin, lookups.LessThanOrEqual):
pass
class KeyTransformGt(KeyTransformNumericLookupMixin, lookups.GreaterThan):
pass
class KeyTransformGte(KeyTransformNumericLookupMixin, lookups.GreaterThanOrEqual):
pass
KeyTransform.register_lookup(KeyTransformIn)
KeyTransform.register_lookup(KeyTransformExact)
KeyTransform.register_lookup(KeyTransformIExact)
KeyTransform.register_lookup(KeyTransformIsNull)
KeyTransform.register_lookup(KeyTransformIContains)
KeyTransform.register_lookup(KeyTransformStartsWith)
KeyTransform.register_lookup(KeyTransformIStartsWith)
KeyTransform.register_lookup(KeyTransformEndsWith)
KeyTransform.register_lookup(KeyTransformIEndsWith)
KeyTransform.register_lookup(KeyTransformRegex)
KeyTransform.register_lookup(KeyTransformIRegex)
KeyTransform.register_lookup(KeyTransformLt)
KeyTransform.register_lookup(KeyTransformLte)
KeyTransform.register_lookup(KeyTransformGt)
KeyTransform.register_lookup(KeyTransformGte)
class KeyTransformFactory:
def __init__(self, key_name):
self.key_name = key_name
def __call__(self, *args, **kwargs):
return KeyTransform(self.key_name, *args, **kwargs)
|
e8d8b8db0f915996bb28280cc49d35fc623e559422b2c4cb83b597fb434b045e | """
Accessors for related objects.
When a field defines a relation between two models, each model class provides
an attribute to access related instances of the other model class (unless the
reverse accessor has been disabled with related_name='+').
Accessors are implemented as descriptors in order to customize access and
assignment. This module defines the descriptor classes.
Forward accessors follow foreign keys. Reverse accessors trace them back. For
example, with the following models::
class Parent(Model):
pass
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``child.parent`` is a forward many-to-one relation. ``parent.children`` is a
reverse many-to-one relation.
There are three types of relations (many-to-one, one-to-one, and many-to-many)
and two directions (forward and reverse) for a total of six combinations.
1. Related instance on the forward side of a many-to-one relation:
``ForwardManyToOneDescriptor``.
Uniqueness of foreign key values is irrelevant to accessing the related
instance, making the many-to-one and one-to-one cases identical as far as
the descriptor is concerned. The constraint is checked upstream (unicity
validation in forms) or downstream (unique indexes in the database).
2. Related instance on the forward side of a one-to-one
relation: ``ForwardOneToOneDescriptor``.
It avoids querying the database when accessing the parent link field in
a multi-table inheritance scenario.
3. Related instance on the reverse side of a one-to-one relation:
``ReverseOneToOneDescriptor``.
One-to-one relations are asymmetrical, despite the apparent symmetry of the
name, because they're implemented in the database with a foreign key from
one table to another. As a consequence ``ReverseOneToOneDescriptor`` is
slightly different from ``ForwardManyToOneDescriptor``.
4. Related objects manager for related instances on the reverse side of a
many-to-one relation: ``ReverseManyToOneDescriptor``.
Unlike the previous two classes, this one provides access to a collection
of objects. It returns a manager rather than an instance.
5. Related objects manager for related instances on the forward or reverse
sides of a many-to-many relation: ``ManyToManyDescriptor``.
Many-to-many relations are symmetrical. The syntax of Django models
requires declaring them on one side but that's an implementation detail.
They could be declared on the other side without any change in behavior.
Therefore the forward and reverse descriptors can be the same.
If you're looking for ``ForwardManyToManyDescriptor`` or
``ReverseManyToManyDescriptor``, use ``ManyToManyDescriptor`` instead.
"""
from django.core.exceptions import FieldError
from django.db import DEFAULT_DB_ALIAS, connections, router, transaction
from django.db.models import Q, Window, signals
from django.db.models.functions import RowNumber
from django.db.models.lookups import GreaterThan, LessThanOrEqual
from django.db.models.query import QuerySet
from django.db.models.query_utils import DeferredAttribute
from django.db.models.utils import resolve_callables
from django.utils.functional import cached_property
class ForeignKeyDeferredAttribute(DeferredAttribute):
def __set__(self, instance, value):
if instance.__dict__.get(self.field.attname) != value and self.field.is_cached(
instance
):
self.field.delete_cached_value(instance)
instance.__dict__[self.field.attname] = value
def _filter_prefetch_queryset(queryset, field_name, instances):
predicate = Q(**{f"{field_name}__in": instances})
if queryset.query.is_sliced:
low_mark, high_mark = queryset.query.low_mark, queryset.query.high_mark
order_by = [
expr
for expr, _ in queryset.query.get_compiler(
using=queryset._db or DEFAULT_DB_ALIAS
).get_order_by()
]
window = Window(RowNumber(), partition_by=field_name, order_by=order_by)
predicate &= GreaterThan(window, low_mark)
if high_mark is not None:
predicate &= LessThanOrEqual(window, high_mark)
queryset.query.clear_limits()
return queryset.filter(predicate)
class ForwardManyToOneDescriptor:
"""
Accessor to the related object on the forward side of a many-to-one or
one-to-one (via ForwardOneToOneDescriptor subclass) relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``Child.parent`` is a ``ForwardManyToOneDescriptor`` instance.
"""
def __init__(self, field_with_rel):
self.field = field_with_rel
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `self.field.model` might
# still be a string model reference.
return type(
"RelatedObjectDoesNotExist",
(self.field.remote_field.model.DoesNotExist, AttributeError),
{
"__module__": self.field.model.__module__,
"__qualname__": "%s.%s.RelatedObjectDoesNotExist"
% (
self.field.model.__qualname__,
self.field.name,
),
},
)
def is_cached(self, instance):
return self.field.is_cached(instance)
def get_queryset(self, **hints):
return self.field.remote_field.model._base_manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
related_field = self.field.foreign_related_fields[0]
remote_field = self.field.remote_field
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {
"%s__in"
% related_field.name: {instance_attr(inst)[0] for inst in instances}
}
else:
query = {"%s__in" % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not remote_field.multiple:
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
remote_field.set_cached_value(rel_obj, instance)
return (
queryset,
rel_obj_attr,
instance_attr,
True,
self.field.get_cache_name(),
False,
)
def get_object(self, instance):
qs = self.get_queryset(instance=instance)
# Assuming the database enforces foreign keys, this won't fail.
return qs.get(self.field.get_reverse_related_filter(instance))
def __get__(self, instance, cls=None):
"""
Get the related instance through the forward relation.
With the example above, when getting ``child.parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``cls`` is the ``Child`` class (we don't need it)
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached
# by the field on the model instance state. It can also be pre-cached
# by the reverse accessor (ReverseOneToOneDescriptor).
try:
rel_obj = self.field.get_cached_value(instance)
except KeyError:
has_value = None not in self.field.get_local_related_value(instance)
ancestor_link = (
instance._meta.get_ancestor_link(self.field.model)
if has_value
else None
)
if ancestor_link and ancestor_link.is_cached(instance):
# An ancestor link will exist if this field is defined on a
# multi-table inheritance parent of the instance's class.
ancestor = ancestor_link.get_cached_value(instance)
# The value might be cached on an ancestor if the instance
# originated from walking down the inheritance chain.
rel_obj = self.field.get_cached_value(ancestor, default=None)
else:
rel_obj = None
if rel_obj is None and has_value:
rel_obj = self.get_object(instance)
remote_field = self.field.remote_field
# If this is a one-to-one relation, set the reverse accessor
# cache on the related object to the current instance to avoid
# an extra SQL query if it's accessed later on.
if not remote_field.multiple:
remote_field.set_cached_value(rel_obj, instance)
self.field.set_cached_value(instance, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the forward relation.
With the example above, when setting ``child.parent = parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``value`` is the ``parent`` instance on the right of the equal sign
"""
# An object must be an instance of the related class.
if value is not None and not isinstance(
value, self.field.remote_field.model._meta.concrete_model
):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.'
% (
value,
instance._meta.object_name,
self.field.name,
self.field.remote_field.model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(
instance.__class__, instance=value
)
if value._state.db is None:
value._state.db = router.db_for_write(
value.__class__, instance=instance
)
if not router.allow_relation(value, instance):
raise ValueError(
'Cannot assign "%r": the current database router prevents this '
"relation." % value
)
remote_field = self.field.remote_field
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = self.field.get_cached_value(instance, default=None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
remote_field.set_cached_value(related, None)
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, None)
# Set the values of the related field.
else:
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
# Set the related instance cache used by __get__ to avoid an SQL query
# when accessing the attribute we just set.
self.field.set_cached_value(instance, value)
# If this is a one-to-one relation, set the reverse accessor cache on
# the related object to the current instance to avoid an extra SQL
# query if it's accessed later on.
if value is not None and not remote_field.multiple:
remote_field.set_cached_value(value, instance)
def __reduce__(self):
"""
Pickling should return the instance attached by self.field on the
model, not a new copy of that descriptor. Use getattr() to retrieve
the instance directly from the model.
"""
return getattr, (self.field.model, self.field.name)
class ForwardOneToOneDescriptor(ForwardManyToOneDescriptor):
"""
Accessor to the related object on the forward side of a one-to-one relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``Restaurant.place`` is a ``ForwardOneToOneDescriptor`` instance.
"""
def get_object(self, instance):
if self.field.remote_field.parent_link:
deferred = instance.get_deferred_fields()
# Because it's a parent link, all the data is available in the
# instance, so populate the parent model with this data.
rel_model = self.field.remote_field.model
fields = [field.attname for field in rel_model._meta.concrete_fields]
# If any of the related model's fields are deferred, fallback to
# fetching all fields from the related model. This avoids a query
# on the related model for every deferred field.
if not any(field in fields for field in deferred):
kwargs = {field: getattr(instance, field) for field in fields}
obj = rel_model(**kwargs)
obj._state.adding = instance._state.adding
obj._state.db = instance._state.db
return obj
return super().get_object(instance)
def __set__(self, instance, value):
super().__set__(instance, value)
# If the primary key is a link to a parent model and a parent instance
# is being set, update the value of the inherited pk(s).
if self.field.primary_key and self.field.remote_field.parent_link:
opts = instance._meta
# Inherited primary key fields from this object's base classes.
inherited_pk_fields = [
field
for field in opts.concrete_fields
if field.primary_key and field.remote_field
]
for field in inherited_pk_fields:
rel_model_pk_name = field.remote_field.model._meta.pk.attname
raw_value = (
getattr(value, rel_model_pk_name) if value is not None else None
)
setattr(instance, rel_model_pk_name, raw_value)
class ReverseOneToOneDescriptor:
"""
Accessor to the related object on the reverse side of a one-to-one
relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``Place.restaurant`` is a ``ReverseOneToOneDescriptor`` instance.
"""
def __init__(self, related):
# Following the example above, `related` is an instance of OneToOneRel
# which represents the reverse restaurant field (place.restaurant).
self.related = related
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ForwardManyToOneDescriptor`.
return type(
"RelatedObjectDoesNotExist",
(self.related.related_model.DoesNotExist, AttributeError),
{
"__module__": self.related.model.__module__,
"__qualname__": "%s.%s.RelatedObjectDoesNotExist"
% (
self.related.model.__qualname__,
self.related.name,
),
},
)
def is_cached(self, instance):
return self.related.is_cached(instance)
def get_queryset(self, **hints):
return self.related.related_model._base_manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.related.field.get_local_related_value
instance_attr = self.related.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {"%s__in" % self.related.field.name: instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
self.related.field.set_cached_value(rel_obj, instance)
return (
queryset,
rel_obj_attr,
instance_attr,
True,
self.related.get_cache_name(),
False,
)
def __get__(self, instance, cls=None):
"""
Get the related instance through the reverse relation.
With the example above, when getting ``place.restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``cls`` is the ``Place`` class (unused)
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached
# by the field on the model instance state. It can also be pre-cached
# by the forward accessor (ForwardManyToOneDescriptor).
try:
rel_obj = self.related.get_cached_value(instance)
except KeyError:
related_pk = instance.pk
if related_pk is None:
rel_obj = None
else:
filter_args = self.related.field.get_forward_related_filter(instance)
try:
rel_obj = self.get_queryset(instance=instance).get(**filter_args)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
# Set the forward accessor cache on the related object to
# the current instance to avoid an extra SQL query if it's
# accessed later on.
self.related.field.set_cached_value(rel_obj, instance)
self.related.set_cached_value(instance, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s."
% (instance.__class__.__name__, self.related.get_accessor_name())
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the reverse relation.
With the example above, when setting ``place.restaurant = restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``value`` is the ``restaurant`` instance on the right of the equal sign
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
# The similarity of the code below to the code in
# ForwardManyToOneDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
if value is None:
# Update the cached related instance (if any) & clear the cache.
# Following the example above, this would be the cached
# ``restaurant`` instance (if any).
rel_obj = self.related.get_cached_value(instance, default=None)
if rel_obj is not None:
# Remove the ``restaurant`` instance from the ``place``
# instance cache.
self.related.delete_cached_value(instance)
# Set the ``place`` field on the ``restaurant``
# instance to None.
setattr(rel_obj, self.related.field.name, None)
elif not isinstance(value, self.related.related_model):
# An object must be an instance of the related class.
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.'
% (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.related_model._meta.object_name,
)
)
else:
if instance._state.db is None:
instance._state.db = router.db_for_write(
instance.__class__, instance=value
)
if value._state.db is None:
value._state.db = router.db_for_write(
value.__class__, instance=instance
)
if not router.allow_relation(value, instance):
raise ValueError(
'Cannot assign "%r": the current database router prevents this '
"relation." % value
)
related_pk = tuple(
getattr(instance, field.attname)
for field in self.related.field.foreign_related_fields
)
# Set the value of the related field to the value of the related
# object's related field.
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Set the related instance cache used by __get__ to avoid an SQL query
# when accessing the attribute we just set.
self.related.set_cached_value(instance, value)
# Set the forward accessor cache on the related object to the current
# instance to avoid an extra SQL query if it's accessed later on.
self.related.field.set_cached_value(value, instance)
def __reduce__(self):
# Same purpose as ForwardManyToOneDescriptor.__reduce__().
return getattr, (self.related.model, self.related.name)
class ReverseManyToOneDescriptor:
"""
Accessor to the related objects manager on the reverse side of a
many-to-one relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``Parent.children`` is a ``ReverseManyToOneDescriptor`` instance.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel):
self.rel = rel
self.field = rel.field
@cached_property
def related_manager_cache_key(self):
# Being able to access the manager instance precludes it from being
# hidden. The rel's accessor name is used to allow multiple managers
# to the same model to coexist. e.g. post.attached_comment_set and
# post.attached_link_set are separately cached.
return self.rel.get_cache_name()
@cached_property
def related_manager_cls(self):
related_model = self.rel.related_model
return create_reverse_many_to_one_manager(
related_model._default_manager.__class__,
self.rel,
)
def __get__(self, instance, cls=None):
"""
Get the related objects through the reverse relation.
With the example above, when getting ``parent.children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``cls`` is the ``Parent`` class (unused)
"""
if instance is None:
return self
key = self.related_manager_cache_key
instance_cache = instance._state.related_managers_cache
if key not in instance_cache:
instance_cache[key] = self.related_manager_cls(instance)
return instance_cache[key]
def _get_set_deprecation_msg_params(self):
return (
"reverse side of a related set",
self.rel.get_accessor_name(),
)
def __set__(self, instance, value):
raise TypeError(
"Direct assignment to the %s is prohibited. Use %s.set() instead."
% self._get_set_deprecation_msg_params(),
)
def create_reverse_many_to_one_manager(superclass, rel):
"""
Create a manager for the reverse side of a many-to-one relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-one relations.
"""
class RelatedManager(superclass):
def __init__(self, instance):
super().__init__()
self.instance = instance
self.model = rel.related_model
self.field = rel.field
self.core_filters = {self.field.name: instance}
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if self.instance.pk is None:
raise ValueError(
f"{instance.__class__.__name__!r} instance needs to have a primary "
f"key value before this relationship can be used."
)
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_reverse_many_to_one_manager(manager.__class__, rel)
return manager_class(self.instance)
do_not_call_in_templates = True
def _check_fk_val(self):
for field in self.field.foreign_related_fields:
if getattr(self.instance, field.attname) is None:
raise ValueError(
f'"{self.instance!r}" needs to have a value for field '
f'"{field.attname}" before this relationship can be used.'
)
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[
db
].features.interprets_empty_strings_as_nulls
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset._defer_next_filter = True
queryset = queryset.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == "" and empty_strings_as_null):
return queryset.none()
if self.field.many_to_one:
# Guard against field-like objects such as GenericRelation
# that abuse create_reverse_many_to_one_manager() with reverse
# one-to-many relationships instead and break known related
# objects assignment.
try:
target_field = self.field.target_field
except FieldError:
# The relationship has multiple target fields. Use a tuple
# for related object id.
rel_obj_id = tuple(
[
getattr(self.instance, target_field.attname)
for target_field in self.field.path_infos[-1].target_fields
]
)
else:
rel_obj_id = getattr(self.instance, target_field.attname)
queryset._known_related_objects = {
self.field: {rel_obj_id: self.instance}
}
return queryset
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(
self.field.remote_field.get_cache_name()
)
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[
self.field.remote_field.get_cache_name()
]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = self.field.get_local_related_value
instance_attr = self.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
queryset = _filter_prefetch_queryset(queryset, self.field.name, instances)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
if not self.field.is_cached(rel_obj):
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, self.field.name, instance)
cache_name = self.field.remote_field.get_cache_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name, False
def add(self, *objs, bulk=True):
self._check_fk_val()
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError(
"'%s' instance expected, got %r"
% (
self.model._meta.object_name,
obj,
)
)
setattr(obj, self.field.name, self.instance)
if bulk:
pks = []
for obj in objs:
check_and_update_obj(obj)
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(
**{
self.field.name: self.instance,
}
)
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def create(self, **kwargs):
self._check_fk_val()
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
self._check_fk_val()
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
self._check_fk_val()
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a
# value of null.
if rel.field.null:
def remove(self, *objs, bulk=True):
if not objs:
return
self._check_fk_val()
val = self.field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError(
"'%s' instance expected, got %r"
% (
self.model._meta.object_name,
obj,
)
)
# Is obj actually part of this descriptor set?
if self.field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise self.field.remote_field.model.DoesNotExist(
"%r is not related to %r." % (obj, self.instance)
)
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, *, bulk=True):
self._check_fk_val()
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.update()` is intrinsically atomic.
queryset.update(**{self.field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, self.field.name, None)
obj.save(update_fields=[self.field.name])
_clear.alters_data = True
def set(self, objs, *, bulk=True, clear=False):
self._check_fk_val()
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
if self.field.null:
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear(bulk=bulk)
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs, bulk=bulk)
self.add(*new_objs, bulk=bulk)
else:
self.add(*objs, bulk=bulk)
set.alters_data = True
return RelatedManager
class ManyToManyDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the forward and reverse sides of
a many-to-many relation.
In the example::
class Pizza(Model):
toppings = ManyToManyField(Topping, related_name='pizzas')
``Pizza.toppings`` and ``Topping.pizzas`` are ``ManyToManyDescriptor``
instances.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel, reverse=False):
super().__init__(rel)
self.reverse = reverse
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.rel.through
@cached_property
def related_manager_cls(self):
related_model = self.rel.related_model if self.reverse else self.rel.model
return create_forward_many_to_many_manager(
related_model._default_manager.__class__,
self.rel,
reverse=self.reverse,
)
@cached_property
def related_manager_cache_key(self):
if self.reverse:
# Symmetrical M2Ms won't have an accessor name, but should never
# end up in the reverse branch anyway, as the related_name ends up
# being hidden, and no public manager is created.
return self.rel.get_cache_name()
else:
# For forward managers, defer to the field name.
return self.field.get_cache_name()
def _get_set_deprecation_msg_params(self):
return (
"%s side of a many-to-many set"
% ("reverse" if self.reverse else "forward"),
self.rel.get_accessor_name() if self.reverse else self.field.name,
)
def create_forward_many_to_many_manager(superclass, rel, reverse):
"""
Create a manager for the either side of a many-to-many relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-many relations.
"""
class ManyRelatedManager(superclass):
def __init__(self, instance=None):
super().__init__()
self.instance = instance
if not reverse:
self.model = rel.model
self.query_field_name = rel.field.related_query_name()
self.prefetch_cache_name = rel.field.name
self.source_field_name = rel.field.m2m_field_name()
self.target_field_name = rel.field.m2m_reverse_field_name()
self.symmetrical = rel.symmetrical
else:
self.model = rel.related_model
self.query_field_name = rel.field.name
self.prefetch_cache_name = rel.field.related_query_name()
self.source_field_name = rel.field.m2m_reverse_field_name()
self.target_field_name = rel.field.m2m_field_name()
self.symmetrical = False
self.through = rel.through
self.reverse = reverse
self.source_field = self.through._meta.get_field(self.source_field_name)
self.target_field = self.through._meta.get_field(self.target_field_name)
self.core_filters = {}
self.pk_field_names = {}
for lh_field, rh_field in self.source_field.related_fields:
core_filter_key = "%s__%s" % (self.query_field_name, rh_field.name)
self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)
self.pk_field_names[lh_field.name] = rh_field.name
self.related_val = self.source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError(
'"%r" needs to have a value for field "%s" before '
"this many-to-many relationship can be used."
% (instance, self.pk_field_names[self.source_field_name])
)
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError(
"%r instance needs to have a primary key value before "
"a many-to-many relationship can be used."
% instance.__class__.__name__
)
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_forward_many_to_many_manager(
manager.__class__, rel, reverse
)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q.create([(self.source_field_name, self.related_val)])
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (
not isinstance(removed_vals, QuerySet) or removed_vals._has_filters()
)
if removed_vals_filters:
filters &= Q.create([(f"{self.target_field_name}__in", removed_vals)])
if self.symmetrical:
symmetrical_filters = Q.create(
[(self.target_field_name, self.related_val)]
)
if removed_vals_filters:
symmetrical_filters &= Q.create(
[(f"{self.source_field_name}__in", removed_vals)]
)
filters |= symmetrical_filters
return filters
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset._defer_next_filter = True
return queryset._next_is_sticky().filter(**self.core_filters)
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name)
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
queryset = _filter_prefetch_queryset(
queryset._next_is_sticky(), self.query_field_name, instances
)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = fk.model._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(
select={
"_prefetch_related_val_%s"
% f.attname: "%s.%s"
% (qn(join_table), qn(f.column))
for f in fk.local_related_fields
}
)
return (
queryset,
lambda result: tuple(
getattr(result, "_prefetch_related_val_%s" % f.attname)
for f in fk.local_related_fields
),
lambda inst: tuple(
f.get_db_prep_value(getattr(inst, f.attname), connection)
for f in fk.foreign_related_fields
),
False,
self.prefetch_cache_name,
False,
)
def add(self, *objs, through_defaults=None):
self._remove_prefetched_objects()
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(
self.source_field_name,
self.target_field_name,
*objs,
through_defaults=through_defaults,
)
# If this is a symmetrical m2m relation to self, add the mirror
# entry in the m2m table.
if self.symmetrical:
self._add_items(
self.target_field_name,
self.source_field_name,
*objs,
through_defaults=through_defaults,
)
add.alters_data = True
def remove(self, *objs):
self._remove_prefetched_objects()
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(
sender=self.through,
action="pre_clear",
instance=self.instance,
reverse=self.reverse,
model=self.model,
pk_set=None,
using=db,
)
self._remove_prefetched_objects()
filters = self._build_remove_filters(super().get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(
sender=self.through,
action="post_clear",
instance=self.instance,
reverse=self.reverse,
model=self.model,
pk_set=None,
using=db,
)
clear.alters_data = True
def set(self, objs, *, clear=False, through_defaults=None):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, through_defaults=through_defaults)
else:
old_ids = set(
self.using(db).values_list(
self.target_field.target_field.attname, flat=True
)
)
new_objs = []
for obj in objs:
fk_val = (
self.target_field.get_foreign_related_value(obj)[0]
if isinstance(obj, self.model)
else self.target_field.get_prep_value(obj)
)
if fk_val in old_ids:
old_ids.remove(fk_val)
else:
new_objs.append(obj)
self.remove(*old_ids)
self.add(*new_objs, through_defaults=through_defaults)
set.alters_data = True
def create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj, through_defaults=through_defaults)
return new_obj
create.alters_data = True
def get_or_create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(
**kwargs
)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj, through_defaults=through_defaults)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(
ManyRelatedManager, self.db_manager(db)
).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj, through_defaults=through_defaults)
return obj, created
update_or_create.alters_data = True
def _get_target_ids(self, target_field_name, objs):
"""
Return the set of ids of `objs` that the target field references.
"""
from django.db.models import Model
target_ids = set()
target_field = self.through._meta.get_field(target_field_name)
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", '
'value is on database "%s"'
% (obj, self.instance._state.db, obj._state.db)
)
target_id = target_field.get_foreign_related_value(obj)[0]
if target_id is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None'
% (obj, target_field_name)
)
target_ids.add(target_id)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r"
% (self.model._meta.object_name, obj)
)
else:
target_ids.add(target_field.get_prep_value(obj))
return target_ids
def _get_missing_target_ids(
self, source_field_name, target_field_name, db, target_ids
):
"""
Return the subset of ids of `objs` that aren't already assigned to
this relationship.
"""
vals = (
self.through._default_manager.using(db)
.values_list(target_field_name, flat=True)
.filter(
**{
source_field_name: self.related_val[0],
"%s__in" % target_field_name: target_ids,
}
)
)
return target_ids.difference(vals)
def _get_add_plan(self, db, source_field_name):
"""
Return a boolean triple of the way the add should be performed.
The first element is whether or not bulk_create(ignore_conflicts)
can be used, the second whether or not signals must be sent, and
the third element is whether or not the immediate bulk insertion
with conflicts ignored can be performed.
"""
# Conflicts can be ignored when the intermediary model is
# auto-created as the only possible collision is on the
# (source_id, target_id) tuple. The same assertion doesn't hold for
# user-defined intermediary models as they could have other fields
# causing conflicts which must be surfaced.
can_ignore_conflicts = (
self.through._meta.auto_created is not False
and connections[db].features.supports_ignore_conflicts
)
# Don't send the signal when inserting duplicate data row
# for symmetrical reverse entries.
must_send_signals = (
self.reverse or source_field_name == self.source_field_name
) and (signals.m2m_changed.has_listeners(self.through))
# Fast addition through bulk insertion can only be performed
# if no m2m_changed listeners are connected for self.through
# as they require the added set of ids to be provided via
# pk_set.
return (
can_ignore_conflicts,
must_send_signals,
(can_ignore_conflicts and not must_send_signals),
)
def _add_items(
self, source_field_name, target_field_name, *objs, through_defaults=None
):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys
# of object instances.
if not objs:
return
through_defaults = dict(resolve_callables(through_defaults or {}))
target_ids = self._get_target_ids(target_field_name, objs)
db = router.db_for_write(self.through, instance=self.instance)
can_ignore_conflicts, must_send_signals, can_fast_add = self._get_add_plan(
db, source_field_name
)
if can_fast_add:
self.through._default_manager.using(db).bulk_create(
[
self.through(
**{
"%s_id" % source_field_name: self.related_val[0],
"%s_id" % target_field_name: target_id,
}
)
for target_id in target_ids
],
ignore_conflicts=True,
)
return
missing_target_ids = self._get_missing_target_ids(
source_field_name, target_field_name, db, target_ids
)
with transaction.atomic(using=db, savepoint=False):
if must_send_signals:
signals.m2m_changed.send(
sender=self.through,
action="pre_add",
instance=self.instance,
reverse=self.reverse,
model=self.model,
pk_set=missing_target_ids,
using=db,
)
# Add the ones that aren't there already.
self.through._default_manager.using(db).bulk_create(
[
self.through(
**through_defaults,
**{
"%s_id" % source_field_name: self.related_val[0],
"%s_id" % target_field_name: target_id,
},
)
for target_id in missing_target_ids
],
ignore_conflicts=can_ignore_conflicts,
)
if must_send_signals:
signals.m2m_changed.send(
sender=self.through,
action="post_add",
instance=self.instance,
reverse=self.reverse,
model=self.model,
pk_set=missing_target_ids,
using=db,
)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove. Either object instances, or primary
# keys of object instances.
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(
sender=self.through,
action="pre_remove",
instance=self.instance,
reverse=self.reverse,
model=self.model,
pk_set=old_ids,
using=db,
)
target_model_qs = super().get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(
**{"%s__in" % self.target_field.target_field.attname: old_ids}
)
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(
sender=self.through,
action="post_remove",
instance=self.instance,
reverse=self.reverse,
model=self.model,
pk_set=old_ids,
using=db,
)
return ManyRelatedManager
|
5a6a9f90bd4aa97183179458c94f12f4af6448a353170b7070cac9ff018d9b97 | import psycopg2
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.backends.ddl_references import IndexColumns
from django.db.backends.utils import strip_quotes
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
# Setting all constraints to IMMEDIATE to allow changing data in the same
# transaction.
sql_update_with_default = (
"UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL"
"; SET CONSTRAINTS ALL IMMEDIATE"
)
sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE"
sql_create_index = (
"CREATE INDEX %(name)s ON %(table)s%(using)s "
"(%(columns)s)%(include)s%(extra)s%(condition)s"
)
sql_create_index_concurrently = (
"CREATE INDEX CONCURRENTLY %(name)s ON %(table)s%(using)s "
"(%(columns)s)%(include)s%(extra)s%(condition)s"
)
sql_delete_index = "DROP INDEX IF EXISTS %(name)s"
sql_delete_index_concurrently = "DROP INDEX CONCURRENTLY IF EXISTS %(name)s"
# Setting the constraint to IMMEDIATE to allow changing data in the same
# transaction.
sql_create_column_inline_fk = (
"CONSTRAINT %(name)s REFERENCES %(to_table)s(%(to_column)s)%(deferrable)s"
"; SET CONSTRAINTS %(namespace)s%(name)s IMMEDIATE"
)
# Setting the constraint to IMMEDIATE runs any deferred checks to allow
# dropping it in the same transaction.
sql_delete_fk = (
"SET CONSTRAINTS %(name)s IMMEDIATE; "
"ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
)
sql_delete_procedure = "DROP FUNCTION %(procedure)s(%(param_types)s)"
sql_add_identity = (
"ALTER TABLE %(table)s ALTER COLUMN %(column)s ADD "
"GENERATED BY DEFAULT AS IDENTITY"
)
sql_drop_indentity = (
"ALTER TABLE %(table)s ALTER COLUMN %(column)s DROP IDENTITY IF EXISTS"
)
def quote_value(self, value):
if isinstance(value, str):
value = value.replace("%", "%%")
adapted = psycopg2.extensions.adapt(value)
if hasattr(adapted, "encoding"):
adapted.encoding = "utf8"
# getquoted() returns a quoted bytestring of the adapted value.
return adapted.getquoted().decode()
def _field_indexes_sql(self, model, field):
output = super()._field_indexes_sql(model, field)
like_index_statement = self._create_like_index_sql(model, field)
if like_index_statement is not None:
output.append(like_index_statement)
return output
def _field_data_type(self, field):
if field.is_relation:
return field.rel_db_type(self.connection)
return self.connection.data_types.get(
field.get_internal_type(),
field.db_type(self.connection),
)
def _field_base_data_types(self, field):
# Yield base data types for array fields.
if field.base_field.get_internal_type() == "ArrayField":
yield from self._field_base_data_types(field.base_field)
else:
yield self._field_data_type(field.base_field)
def _create_like_index_sql(self, model, field):
"""
Return the statement to create an index with varchar operator pattern
when the column type is 'varchar' or 'text', otherwise return None.
"""
db_type = field.db_type(connection=self.connection)
if db_type is not None and (field.db_index or field.unique):
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
#
# The same doesn't apply to array fields such as varchar[size]
# and text[size], so skip them.
if "[" in db_type:
return None
# Non-deterministic collations on Postgresql don't support indexes
# for operator classes varchar_pattern_ops/text_pattern_ops.
if getattr(field, "db_collation", None):
return None
if db_type.startswith("varchar"):
return self._create_index_sql(
model,
fields=[field],
suffix="_like",
opclasses=["varchar_pattern_ops"],
)
elif db_type.startswith("text"):
return self._create_index_sql(
model,
fields=[field],
suffix="_like",
opclasses=["text_pattern_ops"],
)
return None
def _using_sql(self, new_field, old_field):
using_sql = " USING %(column)s::%(type)s"
new_internal_type = new_field.get_internal_type()
old_internal_type = old_field.get_internal_type()
if new_internal_type == "ArrayField" and new_internal_type == old_internal_type:
# Compare base data types for array fields.
if list(self._field_base_data_types(old_field)) != list(
self._field_base_data_types(new_field)
):
return using_sql
elif self._field_data_type(old_field) != self._field_data_type(new_field):
return using_sql
return ""
def _alter_column_type_sql(self, model, old_field, new_field, new_type):
self.sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
# Cast when data type changed.
if using_sql := self._using_sql(new_field, old_field):
self.sql_alter_column_type += using_sql
new_internal_type = new_field.get_internal_type()
old_internal_type = old_field.get_internal_type()
# Make ALTER TYPE with IDENTITY make sense.
table = strip_quotes(model._meta.db_table)
auto_field_types = {
"AutoField",
"BigAutoField",
"SmallAutoField",
}
old_is_auto = old_internal_type in auto_field_types
new_is_auto = new_internal_type in auto_field_types
if new_is_auto and not old_is_auto:
column = strip_quotes(new_field.column)
return (
(
self.sql_alter_column_type
% {
"column": self.quote_name(column),
"type": new_type,
},
[],
),
[
(
self.sql_add_identity
% {
"table": self.quote_name(table),
"column": self.quote_name(column),
},
[],
),
],
)
elif old_is_auto and not new_is_auto:
# Drop IDENTITY if exists (pre-Django 4.1 serial columns don't have
# it).
self.execute(
self.sql_drop_indentity
% {
"table": self.quote_name(table),
"column": self.quote_name(strip_quotes(new_field.column)),
}
)
column = strip_quotes(new_field.column)
sequence_name = "%s_%s_seq" % (table, column)
fragment, _ = super()._alter_column_type_sql(
model, old_field, new_field, new_type
)
return fragment, [
(
# Drop the sequence if exists (Django 4.1+ identity columns
# don't have it).
self.sql_delete_sequence
% {
"sequence": self.quote_name(sequence_name),
},
[],
),
]
else:
return super()._alter_column_type_sql(model, old_field, new_field, new_type)
def _alter_column_collation_sql(
self, model, new_field, new_type, new_collation, old_field
):
sql = self.sql_alter_column_collate
# Cast when data type changed.
if using_sql := self._using_sql(new_field, old_field):
sql += using_sql
return (
sql
% {
"column": self.quote_name(new_field.column),
"type": new_type,
"collation": " " + self._collate_sql(new_collation)
if new_collation
else "",
},
[],
)
def _alter_field(
self,
model,
old_field,
new_field,
old_type,
new_type,
old_db_params,
new_db_params,
strict=False,
):
# Drop indexes on varchar/text/citext columns that are changing to a
# different type.
if (old_field.db_index or old_field.unique) and (
(old_type.startswith("varchar") and not new_type.startswith("varchar"))
or (old_type.startswith("text") and not new_type.startswith("text"))
or (old_type.startswith("citext") and not new_type.startswith("citext"))
):
index_name = self._create_index_name(
model._meta.db_table, [old_field.column], suffix="_like"
)
self.execute(self._delete_index_sql(model, index_name))
super()._alter_field(
model,
old_field,
new_field,
old_type,
new_type,
old_db_params,
new_db_params,
strict,
)
# Added an index? Create any PostgreSQL-specific indexes.
if (not (old_field.db_index or old_field.unique) and new_field.db_index) or (
not old_field.unique and new_field.unique
):
like_index_statement = self._create_like_index_sql(model, new_field)
if like_index_statement is not None:
self.execute(like_index_statement)
# Removed an index? Drop any PostgreSQL-specific indexes.
if old_field.unique and not (new_field.db_index or new_field.unique):
index_to_remove = self._create_index_name(
model._meta.db_table, [old_field.column], suffix="_like"
)
self.execute(self._delete_index_sql(model, index_to_remove))
def _index_columns(self, table, columns, col_suffixes, opclasses):
if opclasses:
return IndexColumns(
table,
columns,
self.quote_name,
col_suffixes=col_suffixes,
opclasses=opclasses,
)
return super()._index_columns(table, columns, col_suffixes, opclasses)
def add_index(self, model, index, concurrently=False):
self.execute(
index.create_sql(model, self, concurrently=concurrently), params=None
)
def remove_index(self, model, index, concurrently=False):
self.execute(index.remove_sql(model, self, concurrently=concurrently))
def _delete_index_sql(self, model, name, sql=None, concurrently=False):
sql = (
self.sql_delete_index_concurrently
if concurrently
else self.sql_delete_index
)
return super()._delete_index_sql(model, name, sql)
def _create_index_sql(
self,
model,
*,
fields=None,
name=None,
suffix="",
using="",
db_tablespace=None,
col_suffixes=(),
sql=None,
opclasses=(),
condition=None,
concurrently=False,
include=None,
expressions=None,
):
sql = (
self.sql_create_index
if not concurrently
else self.sql_create_index_concurrently
)
return super()._create_index_sql(
model,
fields=fields,
name=name,
suffix=suffix,
using=using,
db_tablespace=db_tablespace,
col_suffixes=col_suffixes,
sql=sql,
opclasses=opclasses,
condition=condition,
include=include,
expressions=expressions,
)
|
757ec206912d1ee7596c3fdf57a2ac751f691422b4090c5b0e5d39db8be84448 | import datetime
import os
import re
import unittest
from unittest import mock
from urllib.parse import parse_qsl, urljoin, urlparse
try:
import zoneinfo
except ImportError:
from backports import zoneinfo
try:
import pytz
except ImportError:
pytz = None
from django.contrib import admin
from django.contrib.admin import AdminSite, ModelAdmin
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.contrib.admin.models import ADDITION, DELETION, LogEntry
from django.contrib.admin.options import TO_FIELD_VAR
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.admin.utils import quote
from django.contrib.admin.views.main import IS_POPUP_VAR
from django.contrib.auth import REDIRECT_FIELD_NAME, get_permission_codename
from django.contrib.auth.models import Group, Permission, User
from django.contrib.contenttypes.models import ContentType
from django.core import mail
from django.core.checks import Error
from django.core.files import temp as tempfile
from django.db import connection
from django.forms.utils import ErrorList
from django.template.response import TemplateResponse
from django.test import (
TestCase,
modify_settings,
override_settings,
skipUnlessDBFeature,
)
from django.test.utils import override_script_prefix
from django.urls import NoReverseMatch, resolve, reverse
from django.utils import formats, translation
from django.utils.cache import get_max_age
from django.utils.encoding import iri_to_uri
from django.utils.html import escape
from django.utils.http import urlencode
from . import customadmin
from .admin import CityAdmin, site, site2
from .models import (
Actor,
AdminOrderedAdminMethod,
AdminOrderedCallable,
AdminOrderedField,
AdminOrderedModelMethod,
Album,
Answer,
Answer2,
Article,
BarAccount,
Book,
Bookmark,
Box,
Category,
Chapter,
ChapterXtra1,
ChapterXtra2,
Character,
Child,
Choice,
City,
Collector,
Color,
ComplexSortedPerson,
CoverLetter,
CustomArticle,
CyclicOne,
CyclicTwo,
DooHickey,
Employee,
EmptyModel,
Fabric,
FancyDoodad,
FieldOverridePost,
FilteredManager,
FooAccount,
FoodDelivery,
FunkyTag,
Gallery,
Grommet,
Inquisition,
Language,
Link,
MainPrepopulated,
Media,
ModelWithStringPrimaryKey,
OtherStory,
Paper,
Parent,
ParentWithDependentChildren,
ParentWithUUIDPK,
Person,
Persona,
Picture,
Pizza,
Plot,
PlotDetails,
PluggableSearchPerson,
Podcast,
Post,
PrePopulatedPost,
Promo,
Question,
ReadablePizza,
ReadOnlyPizza,
ReadOnlyRelatedField,
Recommendation,
Recommender,
RelatedPrepopulated,
RelatedWithUUIDPKModel,
Report,
Restaurant,
RowLevelChangePermissionModel,
SecretHideout,
Section,
ShortMessage,
Simple,
Song,
State,
Story,
SuperSecretHideout,
SuperVillain,
Telegram,
TitleTranslation,
Topping,
Traveler,
UnchangeableObject,
UndeletableObject,
UnorderedObject,
UserProxy,
Villain,
Vodcast,
Whatsit,
Widget,
Worker,
WorkHour,
)
ERROR_MESSAGE = "Please enter the correct username and password \
for a staff account. Note that both fields may be case-sensitive."
MULTIPART_ENCTYPE = 'enctype="multipart/form-data"'
def make_aware_datetimes(dt, iana_key):
"""Makes one aware datetime for each supported time zone provider."""
yield dt.replace(tzinfo=zoneinfo.ZoneInfo(iana_key))
if pytz is not None:
yield pytz.timezone(iana_key).localize(dt, is_dst=None)
class AdminFieldExtractionMixin:
"""
Helper methods for extracting data from AdminForm.
"""
def get_admin_form_fields(self, response):
"""
Return a list of AdminFields for the AdminForm in the response.
"""
fields = []
for fieldset in response.context["adminform"]:
for field_line in fieldset:
fields.extend(field_line)
return fields
def get_admin_readonly_fields(self, response):
"""
Return the readonly fields for the response's AdminForm.
"""
return [f for f in self.get_admin_form_fields(response) if f.is_readonly]
def get_admin_readonly_field(self, response, field_name):
"""
Return the readonly field for the given field_name.
"""
admin_readonly_fields = self.get_admin_readonly_fields(response)
for field in admin_readonly_fields:
if field.field["name"] == field_name:
return field
@override_settings(ROOT_URLCONF="admin_views.urls", USE_I18N=True, LANGUAGE_CODE="en")
class AdminViewBasicTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
title="Article 1",
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
title="Article 2",
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.color1 = Color.objects.create(value="Red", warm=True)
cls.color2 = Color.objects.create(value="Orange", warm=True)
cls.color3 = Color.objects.create(value="Blue", warm=False)
cls.color4 = Color.objects.create(value="Green", warm=False)
cls.fab1 = Fabric.objects.create(surface="x")
cls.fab2 = Fabric.objects.create(surface="y")
cls.fab3 = Fabric.objects.create(surface="plain")
cls.b1 = Book.objects.create(name="Book 1")
cls.b2 = Book.objects.create(name="Book 2")
cls.pro1 = Promo.objects.create(name="Promo 1", book=cls.b1)
cls.pro1 = Promo.objects.create(name="Promo 2", book=cls.b2)
cls.chap1 = Chapter.objects.create(
title="Chapter 1", content="[ insert contents here ]", book=cls.b1
)
cls.chap2 = Chapter.objects.create(
title="Chapter 2", content="[ insert contents here ]", book=cls.b1
)
cls.chap3 = Chapter.objects.create(
title="Chapter 1", content="[ insert contents here ]", book=cls.b2
)
cls.chap4 = Chapter.objects.create(
title="Chapter 2", content="[ insert contents here ]", book=cls.b2
)
cls.cx1 = ChapterXtra1.objects.create(chap=cls.chap1, xtra="ChapterXtra1 1")
cls.cx2 = ChapterXtra1.objects.create(chap=cls.chap3, xtra="ChapterXtra1 2")
Actor.objects.create(name="Palin", age=27)
# Post data for edit inline
cls.inline_post_data = {
"name": "Test section",
# inline data
"article_set-TOTAL_FORMS": "6",
"article_set-INITIAL_FORMS": "3",
"article_set-MAX_NUM_FORMS": "0",
"article_set-0-id": cls.a1.pk,
# there is no title in database, give one here or formset will fail.
"article_set-0-title": "Norske bostaver æøå skaper problemer",
"article_set-0-content": "<p>Middle content</p>",
"article_set-0-date_0": "2008-03-18",
"article_set-0-date_1": "11:54:58",
"article_set-0-section": cls.s1.pk,
"article_set-1-id": cls.a2.pk,
"article_set-1-title": "Need a title.",
"article_set-1-content": "<p>Oldest content</p>",
"article_set-1-date_0": "2000-03-18",
"article_set-1-date_1": "11:54:58",
"article_set-2-id": cls.a3.pk,
"article_set-2-title": "Need a title.",
"article_set-2-content": "<p>Newest content</p>",
"article_set-2-date_0": "2009-03-18",
"article_set-2-date_1": "11:54:58",
"article_set-3-id": "",
"article_set-3-title": "",
"article_set-3-content": "",
"article_set-3-date_0": "",
"article_set-3-date_1": "",
"article_set-4-id": "",
"article_set-4-title": "",
"article_set-4-content": "",
"article_set-4-date_0": "",
"article_set-4-date_1": "",
"article_set-5-id": "",
"article_set-5-title": "",
"article_set-5-content": "",
"article_set-5-date_0": "",
"article_set-5-date_1": "",
}
def setUp(self):
self.client.force_login(self.superuser)
def assertContentBefore(self, response, text1, text2, failing_msg=None):
"""
Testing utility asserting that text1 appears before text2 in response
content.
"""
self.assertEqual(response.status_code, 200)
self.assertLess(
response.content.index(text1.encode()),
response.content.index(text2.encode()),
(failing_msg or "")
+ "\nResponse:\n"
+ response.content.decode(response.charset),
)
class AdminViewBasicTest(AdminViewBasicTestCase):
def test_trailing_slash_required(self):
"""
If you leave off the trailing slash, app should redirect and add it.
"""
add_url = reverse("admin:admin_views_article_add")
response = self.client.get(add_url[:-1])
self.assertRedirects(response, add_url, status_code=301)
def test_basic_add_GET(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
def test_add_with_GET_args(self):
response = self.client.get(
reverse("admin:admin_views_section_add"), {"name": "My Section"}
)
self.assertContains(
response,
'value="My Section"',
msg_prefix="Couldn't find an input with the right value in the response",
)
def test_basic_edit_GET(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
def test_basic_edit_GET_string_PK(self):
"""
GET on the change_view (when passing a string as the PK argument for a
model with an integer PK field) redirects to the index page with a
message saying the object doesn't exist.
"""
response = self.client.get(
reverse("admin:admin_views_section_change", args=(quote("abc/<b>"),)),
follow=True,
)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["section with ID “abc/<b>” doesn’t exist. Perhaps it was deleted?"],
)
def test_basic_edit_GET_old_url_redirect(self):
"""
The change URL changed in Django 1.9, but the old one still redirects.
"""
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)).replace(
"change/", ""
)
)
self.assertRedirects(
response, reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
def test_basic_inheritance_GET_string_PK(self):
"""
GET on the change_view (for inherited models) redirects to the index
page with a message saying the object doesn't exist.
"""
response = self.client.get(
reverse("admin:admin_views_supervillain_change", args=("abc",)), follow=True
)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["super villain with ID “abc” doesn’t exist. Perhaps it was deleted?"],
)
def test_basic_add_POST(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "Another Section",
# inline data
"article_set-TOTAL_FORMS": "3",
"article_set-INITIAL_FORMS": "0",
"article_set-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse("admin:admin_views_section_add"), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_popup_add_POST(self):
"""HTTP response from a popup is properly escaped."""
post_data = {
IS_POPUP_VAR: "1",
"title": "title with a new\nline",
"content": "some content",
"date_0": "2010-09-10",
"date_1": "14:55:39",
}
response = self.client.post(reverse("admin:admin_views_article_add"), post_data)
self.assertContains(response, "title with a new\\nline")
def test_basic_edit_POST(self):
"""
A smoke test to ensure POST on edit_view works.
"""
url = reverse("admin:admin_views_section_change", args=(self.s1.pk,))
response = self.client.post(url, self.inline_post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_edit_save_as(self):
"""
Test "save as".
"""
post_data = self.inline_post_data.copy()
post_data.update(
{
"_saveasnew": "Save+as+new",
"article_set-1-section": "1",
"article_set-2-section": "1",
"article_set-3-section": "1",
"article_set-4-section": "1",
"article_set-5-section": "1",
}
)
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), post_data
)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_edit_save_as_delete_inline(self):
"""
Should be able to "Save as new" while also deleting an inline.
"""
post_data = self.inline_post_data.copy()
post_data.update(
{
"_saveasnew": "Save+as+new",
"article_set-1-section": "1",
"article_set-2-section": "1",
"article_set-2-DELETE": "1",
"article_set-3-section": "1",
}
)
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), post_data
)
self.assertEqual(response.status_code, 302)
# started with 3 articles, one was deleted.
self.assertEqual(Section.objects.latest("id").article_set.count(), 2)
def test_change_list_column_field_classes(self):
response = self.client.get(reverse("admin:admin_views_article_changelist"))
# callables display the callable name.
self.assertContains(response, "column-callable_year")
self.assertContains(response, "field-callable_year")
# lambdas display as "lambda" + index that they appear in list_display.
self.assertContains(response, "column-lambda8")
self.assertContains(response, "field-lambda8")
def test_change_list_sorting_callable(self):
"""
Ensure we can sort on a list_display field that is a callable
(column 2 is callable_year in ArticleAdmin)
"""
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": 2}
)
self.assertContentBefore(
response,
"Oldest content",
"Middle content",
"Results of sorting on callable are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Newest content",
"Results of sorting on callable are out of order.",
)
def test_change_list_sorting_property(self):
"""
Sort on a list_display field that is a property (column 10 is
a property in Article model).
"""
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": 10}
)
self.assertContentBefore(
response,
"Oldest content",
"Middle content",
"Results of sorting on property are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Newest content",
"Results of sorting on property are out of order.",
)
def test_change_list_sorting_callable_query_expression(self):
"""Query expressions may be used for admin_order_field."""
tests = [
("order_by_expression", 9),
("order_by_f_expression", 12),
("order_by_orderby_expression", 13),
]
for admin_order_field, index in tests:
with self.subTest(admin_order_field):
response = self.client.get(
reverse("admin:admin_views_article_changelist"),
{"o": index},
)
self.assertContentBefore(
response,
"Oldest content",
"Middle content",
"Results of sorting on callable are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Newest content",
"Results of sorting on callable are out of order.",
)
def test_change_list_sorting_callable_query_expression_reverse(self):
tests = [
("order_by_expression", -9),
("order_by_f_expression", -12),
("order_by_orderby_expression", -13),
]
for admin_order_field, index in tests:
with self.subTest(admin_order_field):
response = self.client.get(
reverse("admin:admin_views_article_changelist"),
{"o": index},
)
self.assertContentBefore(
response,
"Middle content",
"Oldest content",
"Results of sorting on callable are out of order.",
)
self.assertContentBefore(
response,
"Newest content",
"Middle content",
"Results of sorting on callable are out of order.",
)
def test_change_list_sorting_model(self):
"""
Ensure we can sort on a list_display field that is a Model method
(column 3 is 'model_year' in ArticleAdmin)
"""
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": "-3"}
)
self.assertContentBefore(
response,
"Newest content",
"Middle content",
"Results of sorting on Model method are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Oldest content",
"Results of sorting on Model method are out of order.",
)
def test_change_list_sorting_model_admin(self):
"""
Ensure we can sort on a list_display field that is a ModelAdmin method
(column 4 is 'modeladmin_year' in ArticleAdmin)
"""
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": "4"}
)
self.assertContentBefore(
response,
"Oldest content",
"Middle content",
"Results of sorting on ModelAdmin method are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Newest content",
"Results of sorting on ModelAdmin method are out of order.",
)
def test_change_list_sorting_model_admin_reverse(self):
"""
Ensure we can sort on a list_display field that is a ModelAdmin
method in reverse order (i.e. admin_order_field uses the '-' prefix)
(column 6 is 'model_year_reverse' in ArticleAdmin)
"""
td = '<td class="field-model_property_year">%s</td>'
td_2000, td_2008, td_2009 = td % 2000, td % 2008, td % 2009
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": "6"}
)
self.assertContentBefore(
response,
td_2009,
td_2008,
"Results of sorting on ModelAdmin method are out of order.",
)
self.assertContentBefore(
response,
td_2008,
td_2000,
"Results of sorting on ModelAdmin method are out of order.",
)
# Let's make sure the ordering is right and that we don't get a
# FieldError when we change to descending order
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": "-6"}
)
self.assertContentBefore(
response,
td_2000,
td_2008,
"Results of sorting on ModelAdmin method are out of order.",
)
self.assertContentBefore(
response,
td_2008,
td_2009,
"Results of sorting on ModelAdmin method are out of order.",
)
def test_change_list_sorting_multiple(self):
p1 = Person.objects.create(name="Chris", gender=1, alive=True)
p2 = Person.objects.create(name="Chris", gender=2, alive=True)
p3 = Person.objects.create(name="Bob", gender=1, alive=True)
link1 = reverse("admin:admin_views_person_change", args=(p1.pk,))
link2 = reverse("admin:admin_views_person_change", args=(p2.pk,))
link3 = reverse("admin:admin_views_person_change", args=(p3.pk,))
# Sort by name, gender
response = self.client.get(
reverse("admin:admin_views_person_changelist"), {"o": "1.2"}
)
self.assertContentBefore(response, link3, link1)
self.assertContentBefore(response, link1, link2)
# Sort by gender descending, name
response = self.client.get(
reverse("admin:admin_views_person_changelist"), {"o": "-2.1"}
)
self.assertContentBefore(response, link2, link3)
self.assertContentBefore(response, link3, link1)
def test_change_list_sorting_preserve_queryset_ordering(self):
"""
If no ordering is defined in `ModelAdmin.ordering` or in the query
string, then the underlying order of the queryset should not be
changed, even if it is defined in `Modeladmin.get_queryset()`.
Refs #11868, #7309.
"""
p1 = Person.objects.create(name="Amy", gender=1, alive=True, age=80)
p2 = Person.objects.create(name="Bob", gender=1, alive=True, age=70)
p3 = Person.objects.create(name="Chris", gender=2, alive=False, age=60)
link1 = reverse("admin:admin_views_person_change", args=(p1.pk,))
link2 = reverse("admin:admin_views_person_change", args=(p2.pk,))
link3 = reverse("admin:admin_views_person_change", args=(p3.pk,))
response = self.client.get(reverse("admin:admin_views_person_changelist"), {})
self.assertContentBefore(response, link3, link2)
self.assertContentBefore(response, link2, link1)
def test_change_list_sorting_model_meta(self):
# Test ordering on Model Meta is respected
l1 = Language.objects.create(iso="ur", name="Urdu")
l2 = Language.objects.create(iso="ar", name="Arabic")
link1 = reverse("admin:admin_views_language_change", args=(quote(l1.pk),))
link2 = reverse("admin:admin_views_language_change", args=(quote(l2.pk),))
response = self.client.get(reverse("admin:admin_views_language_changelist"), {})
self.assertContentBefore(response, link2, link1)
# Test we can override with query string
response = self.client.get(
reverse("admin:admin_views_language_changelist"), {"o": "-1"}
)
self.assertContentBefore(response, link1, link2)
def test_change_list_sorting_override_model_admin(self):
# Test ordering on Model Admin is respected, and overrides Model Meta
dt = datetime.datetime.now()
p1 = Podcast.objects.create(name="A", release_date=dt)
p2 = Podcast.objects.create(name="B", release_date=dt - datetime.timedelta(10))
link1 = reverse("admin:admin_views_podcast_change", args=(p1.pk,))
link2 = reverse("admin:admin_views_podcast_change", args=(p2.pk,))
response = self.client.get(reverse("admin:admin_views_podcast_changelist"), {})
self.assertContentBefore(response, link1, link2)
def test_multiple_sort_same_field(self):
# The changelist displays the correct columns if two columns correspond
# to the same ordering field.
dt = datetime.datetime.now()
p1 = Podcast.objects.create(name="A", release_date=dt)
p2 = Podcast.objects.create(name="B", release_date=dt - datetime.timedelta(10))
link1 = reverse("admin:admin_views_podcast_change", args=(quote(p1.pk),))
link2 = reverse("admin:admin_views_podcast_change", args=(quote(p2.pk),))
response = self.client.get(reverse("admin:admin_views_podcast_changelist"), {})
self.assertContentBefore(response, link1, link2)
p1 = ComplexSortedPerson.objects.create(name="Bob", age=10)
p2 = ComplexSortedPerson.objects.create(name="Amy", age=20)
link1 = reverse("admin:admin_views_complexsortedperson_change", args=(p1.pk,))
link2 = reverse("admin:admin_views_complexsortedperson_change", args=(p2.pk,))
response = self.client.get(
reverse("admin:admin_views_complexsortedperson_changelist"), {}
)
# Should have 5 columns (including action checkbox col)
self.assertContains(response, '<th scope="col"', count=5)
self.assertContains(response, "Name")
self.assertContains(response, "Colored name")
# Check order
self.assertContentBefore(response, "Name", "Colored name")
# Check sorting - should be by name
self.assertContentBefore(response, link2, link1)
def test_sort_indicators_admin_order(self):
"""
The admin shows default sort indicators for all kinds of 'ordering'
fields: field names, method on the model admin and model itself, and
other callables. See #17252.
"""
models = [
(AdminOrderedField, "adminorderedfield"),
(AdminOrderedModelMethod, "adminorderedmodelmethod"),
(AdminOrderedAdminMethod, "adminorderedadminmethod"),
(AdminOrderedCallable, "adminorderedcallable"),
]
for model, url in models:
model.objects.create(stuff="The Last Item", order=3)
model.objects.create(stuff="The First Item", order=1)
model.objects.create(stuff="The Middle Item", order=2)
response = self.client.get(
reverse("admin:admin_views_%s_changelist" % url), {}
)
# Should have 3 columns including action checkbox col.
self.assertContains(response, '<th scope="col"', count=3, msg_prefix=url)
# Check if the correct column was selected. 2 is the index of the
# 'order' column in the model admin's 'list_display' with 0 being
# the implicit 'action_checkbox' and 1 being the column 'stuff'.
self.assertEqual(
response.context["cl"].get_ordering_field_columns(), {2: "asc"}
)
# Check order of records.
self.assertContentBefore(response, "The First Item", "The Middle Item")
self.assertContentBefore(response, "The Middle Item", "The Last Item")
def test_has_related_field_in_list_display_fk(self):
"""Joins shouldn't be performed for <FK>_id fields in list display."""
state = State.objects.create(name="Karnataka")
City.objects.create(state=state, name="Bangalore")
response = self.client.get(reverse("admin:admin_views_city_changelist"), {})
response.context["cl"].list_display = ["id", "name", "state"]
self.assertIs(response.context["cl"].has_related_field_in_list_display(), True)
response.context["cl"].list_display = ["id", "name", "state_id"]
self.assertIs(response.context["cl"].has_related_field_in_list_display(), False)
def test_has_related_field_in_list_display_o2o(self):
"""Joins shouldn't be performed for <O2O>_id fields in list display."""
media = Media.objects.create(name="Foo")
Vodcast.objects.create(media=media)
response = self.client.get(reverse("admin:admin_views_vodcast_changelist"), {})
response.context["cl"].list_display = ["media"]
self.assertIs(response.context["cl"].has_related_field_in_list_display(), True)
response.context["cl"].list_display = ["media_id"]
self.assertIs(response.context["cl"].has_related_field_in_list_display(), False)
def test_limited_filter(self):
"""
Admin changelist filters do not contain objects excluded via
limit_choices_to.
"""
response = self.client.get(reverse("admin:admin_views_thing_changelist"))
self.assertContains(
response,
'<div id="changelist-filter">',
msg_prefix="Expected filter not found in changelist view",
)
self.assertNotContains(
response,
'<a href="?color__id__exact=3">Blue</a>',
msg_prefix="Changelist filter not correctly limited by limit_choices_to",
)
def test_relation_spanning_filters(self):
changelist_url = reverse("admin:admin_views_chapterxtra1_changelist")
response = self.client.get(changelist_url)
self.assertContains(response, '<div id="changelist-filter">')
filters = {
"chap__id__exact": {
"values": [c.id for c in Chapter.objects.all()],
"test": lambda obj, value: obj.chap.id == value,
},
"chap__title": {
"values": [c.title for c in Chapter.objects.all()],
"test": lambda obj, value: obj.chap.title == value,
},
"chap__book__id__exact": {
"values": [b.id for b in Book.objects.all()],
"test": lambda obj, value: obj.chap.book.id == value,
},
"chap__book__name": {
"values": [b.name for b in Book.objects.all()],
"test": lambda obj, value: obj.chap.book.name == value,
},
"chap__book__promo__id__exact": {
"values": [p.id for p in Promo.objects.all()],
"test": lambda obj, value: obj.chap.book.promo_set.filter(
id=value
).exists(),
},
"chap__book__promo__name": {
"values": [p.name for p in Promo.objects.all()],
"test": lambda obj, value: obj.chap.book.promo_set.filter(
name=value
).exists(),
},
# A forward relation (book) after a reverse relation (promo).
"guest_author__promo__book__id__exact": {
"values": [p.id for p in Book.objects.all()],
"test": lambda obj, value: obj.guest_author.promo_set.filter(
book=value
).exists(),
},
}
for filter_path, params in filters.items():
for value in params["values"]:
query_string = urlencode({filter_path: value})
# ensure filter link exists
self.assertContains(response, '<a href="?%s"' % query_string)
# ensure link works
filtered_response = self.client.get(
"%s?%s" % (changelist_url, query_string)
)
self.assertEqual(filtered_response.status_code, 200)
# ensure changelist contains only valid objects
for obj in filtered_response.context["cl"].queryset.all():
self.assertTrue(params["test"](obj, value))
def test_incorrect_lookup_parameters(self):
"""Ensure incorrect lookup parameters are handled gracefully."""
changelist_url = reverse("admin:admin_views_thing_changelist")
response = self.client.get(changelist_url, {"notarealfield": "5"})
self.assertRedirects(response, "%s?e=1" % changelist_url)
# Spanning relationships through a nonexistent related object (Refs #16716)
response = self.client.get(changelist_url, {"notarealfield__whatever": "5"})
self.assertRedirects(response, "%s?e=1" % changelist_url)
response = self.client.get(
changelist_url, {"color__id__exact": "StringNotInteger!"}
)
self.assertRedirects(response, "%s?e=1" % changelist_url)
# Regression test for #18530
response = self.client.get(changelist_url, {"pub_date__gte": "foo"})
self.assertRedirects(response, "%s?e=1" % changelist_url)
def test_isnull_lookups(self):
"""Ensure is_null is handled correctly."""
Article.objects.create(
title="I Could Go Anywhere",
content="Versatile",
date=datetime.datetime.now(),
)
changelist_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(changelist_url)
self.assertContains(response, "4 articles")
response = self.client.get(changelist_url, {"section__isnull": "false"})
self.assertContains(response, "3 articles")
response = self.client.get(changelist_url, {"section__isnull": "0"})
self.assertContains(response, "3 articles")
response = self.client.get(changelist_url, {"section__isnull": "true"})
self.assertContains(response, "1 article")
response = self.client.get(changelist_url, {"section__isnull": "1"})
self.assertContains(response, "1 article")
def test_logout_and_password_change_URLs(self):
response = self.client.get(reverse("admin:admin_views_article_changelist"))
self.assertContains(
response,
'<form id="logout-form" method="post" action="%s">'
% reverse("admin:logout"),
)
self.assertContains(
response, '<a href="%s">' % reverse("admin:password_change")
)
def test_named_group_field_choices_change_list(self):
"""
Ensures the admin changelist shows correct values in the relevant column
for rows corresponding to instances of a model in which a named group
has been used in the choices option of a field.
"""
link1 = reverse("admin:admin_views_fabric_change", args=(self.fab1.pk,))
link2 = reverse("admin:admin_views_fabric_change", args=(self.fab2.pk,))
response = self.client.get(reverse("admin:admin_views_fabric_changelist"))
fail_msg = (
"Changelist table isn't showing the right human-readable values "
"set by a model field 'choices' option named group."
)
self.assertContains(
response,
'<a href="%s">Horizontal</a>' % link1,
msg_prefix=fail_msg,
html=True,
)
self.assertContains(
response,
'<a href="%s">Vertical</a>' % link2,
msg_prefix=fail_msg,
html=True,
)
def test_named_group_field_choices_filter(self):
"""
Ensures the filter UI shows correctly when at least one named group has
been used in the choices option of a model field.
"""
response = self.client.get(reverse("admin:admin_views_fabric_changelist"))
fail_msg = (
"Changelist filter isn't showing options contained inside a model "
"field 'choices' option named group."
)
self.assertContains(response, '<div id="changelist-filter">')
self.assertContains(
response,
'<a href="?surface__exact=x">Horizontal</a>',
msg_prefix=fail_msg,
html=True,
)
self.assertContains(
response,
'<a href="?surface__exact=y">Vertical</a>',
msg_prefix=fail_msg,
html=True,
)
def test_change_list_null_boolean_display(self):
Post.objects.create(public=None)
response = self.client.get(reverse("admin:admin_views_post_changelist"))
self.assertContains(response, "icon-unknown.svg")
def test_display_decorator_with_boolean_and_empty_value(self):
msg = (
"The boolean and empty_value arguments to the @display decorator "
"are mutually exclusive."
)
with self.assertRaisesMessage(ValueError, msg):
class BookAdmin(admin.ModelAdmin):
@admin.display(boolean=True, empty_value="(Missing)")
def is_published(self, obj):
return obj.publish_date is not None
def test_i18n_language_non_english_default(self):
"""
Check if the JavaScript i18n view returns an empty language catalog
if the default language is non-English but the selected language
is English. See #13388 and #3594 for more details.
"""
with self.settings(LANGUAGE_CODE="fr"), translation.override("en-us"):
response = self.client.get(reverse("admin:jsi18n"))
self.assertNotContains(response, "Choisir une heure")
def test_i18n_language_non_english_fallback(self):
"""
Makes sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE="fr"), translation.override("none"):
response = self.client.get(reverse("admin:jsi18n"))
self.assertContains(response, "Choisir une heure")
def test_jsi18n_with_context(self):
response = self.client.get(reverse("admin-extra-context:jsi18n"))
self.assertEqual(response.status_code, 200)
def test_jsi18n_format_fallback(self):
"""
The JavaScript i18n view doesn't return localized date/time formats
when the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE="ru"), translation.override("none"):
response = self.client.get(reverse("admin:jsi18n"))
self.assertNotContains(response, "%d.%m.%Y %H:%M:%S")
self.assertContains(response, "%Y-%m-%d %H:%M:%S")
def test_disallowed_filtering(self):
with self.assertLogs("django.security.DisallowedModelAdminLookup", "ERROR"):
response = self.client.get(
"%s?owner__email__startswith=fuzzy"
% reverse("admin:admin_views_album_changelist")
)
self.assertEqual(response.status_code, 400)
# Filters are allowed if explicitly included in list_filter
response = self.client.get(
"%s?color__value__startswith=red"
% reverse("admin:admin_views_thing_changelist")
)
self.assertEqual(response.status_code, 200)
response = self.client.get(
"%s?color__value=red" % reverse("admin:admin_views_thing_changelist")
)
self.assertEqual(response.status_code, 200)
# Filters should be allowed if they involve a local field without the
# need to allow them in list_filter or date_hierarchy.
response = self.client.get(
"%s?age__gt=30" % reverse("admin:admin_views_person_changelist")
)
self.assertEqual(response.status_code, 200)
e1 = Employee.objects.create(
name="Anonymous", gender=1, age=22, alive=True, code="123"
)
e2 = Employee.objects.create(
name="Visitor", gender=2, age=19, alive=True, code="124"
)
WorkHour.objects.create(datum=datetime.datetime.now(), employee=e1)
WorkHour.objects.create(datum=datetime.datetime.now(), employee=e2)
response = self.client.get(reverse("admin:admin_views_workhour_changelist"))
self.assertContains(response, "employee__person_ptr__exact")
response = self.client.get(
"%s?employee__person_ptr__exact=%d"
% (reverse("admin:admin_views_workhour_changelist"), e1.pk)
)
self.assertEqual(response.status_code, 200)
def test_disallowed_to_field(self):
url = reverse("admin:admin_views_section_changelist")
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.get(url, {TO_FIELD_VAR: "missing_field"})
self.assertEqual(response.status_code, 400)
# Specifying a field that is not referred by any other model registered
# to this admin site should raise an exception.
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.get(
reverse("admin:admin_views_section_changelist"), {TO_FIELD_VAR: "name"}
)
self.assertEqual(response.status_code, 400)
# Primary key should always be allowed, even if the referenced model
# isn't registered.
response = self.client.get(
reverse("admin:admin_views_notreferenced_changelist"), {TO_FIELD_VAR: "id"}
)
self.assertEqual(response.status_code, 200)
# Specifying a field referenced by another model though a m2m should be
# allowed.
response = self.client.get(
reverse("admin:admin_views_recipe_changelist"), {TO_FIELD_VAR: "rname"}
)
self.assertEqual(response.status_code, 200)
# Specifying a field referenced through a reverse m2m relationship
# should be allowed.
response = self.client.get(
reverse("admin:admin_views_ingredient_changelist"), {TO_FIELD_VAR: "iname"}
)
self.assertEqual(response.status_code, 200)
# Specifying a field that is not referred by any other model directly
# registered to this admin site but registered through inheritance
# should be allowed.
response = self.client.get(
reverse("admin:admin_views_referencedbyparent_changelist"),
{TO_FIELD_VAR: "name"},
)
self.assertEqual(response.status_code, 200)
# Specifying a field that is only referred to by a inline of a
# registered model should be allowed.
response = self.client.get(
reverse("admin:admin_views_referencedbyinline_changelist"),
{TO_FIELD_VAR: "name"},
)
self.assertEqual(response.status_code, 200)
# #25622 - Specifying a field of a model only referred by a generic
# relation should raise DisallowedModelAdminToField.
url = reverse("admin:admin_views_referencedbygenrel_changelist")
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.get(url, {TO_FIELD_VAR: "object_id"})
self.assertEqual(response.status_code, 400)
# We also want to prevent the add, change, and delete views from
# leaking a disallowed field value.
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.post(
reverse("admin:admin_views_section_add"), {TO_FIELD_VAR: "name"}
)
self.assertEqual(response.status_code, 400)
section = Section.objects.create()
url = reverse("admin:admin_views_section_change", args=(section.pk,))
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.post(url, {TO_FIELD_VAR: "name"})
self.assertEqual(response.status_code, 400)
url = reverse("admin:admin_views_section_delete", args=(section.pk,))
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.post(url, {TO_FIELD_VAR: "name"})
self.assertEqual(response.status_code, 400)
def test_allowed_filtering_15103(self):
"""
Regressions test for ticket 15103 - filtering on fields defined in a
ForeignKey 'limit_choices_to' should be allowed, otherwise raw_id_fields
can break.
"""
# Filters should be allowed if they are defined on a ForeignKey
# pointing to this model.
url = "%s?leader__name=Palin&leader__age=27" % reverse(
"admin:admin_views_inquisition_changelist"
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_popup_dismiss_related(self):
"""
Regression test for ticket 20664 - ensure the pk is properly quoted.
"""
actor = Actor.objects.create(name="Palin", age=27)
response = self.client.get(
"%s?%s" % (reverse("admin:admin_views_actor_changelist"), IS_POPUP_VAR)
)
self.assertContains(response, 'data-popup-opener="%s"' % actor.pk)
def test_hide_change_password(self):
"""
Tests if the "change password" link in the admin is hidden if the User
does not have a usable password set.
(against 9bea85795705d015cdadc82c68b99196a8554f5c)
"""
user = User.objects.get(username="super")
user.set_unusable_password()
user.save()
self.client.force_login(user)
response = self.client.get(reverse("admin:index"))
self.assertNotContains(
response,
reverse("admin:password_change"),
msg_prefix=(
'The "change password" link should not be displayed if a user does not '
"have a usable password."
),
)
def test_change_view_with_show_delete_extra_context(self):
"""
The 'show_delete' context variable in the admin's change view controls
the display of the delete button.
"""
instance = UndeletableObject.objects.create(name="foo")
response = self.client.get(
reverse("admin:admin_views_undeletableobject_change", args=(instance.pk,))
)
self.assertNotContains(response, "deletelink")
def test_change_view_logs_m2m_field_changes(self):
"""Changes to ManyToManyFields are included in the object's history."""
pizza = ReadablePizza.objects.create(name="Cheese")
cheese = Topping.objects.create(name="cheese")
post_data = {"name": pizza.name, "toppings": [cheese.pk]}
response = self.client.post(
reverse("admin:admin_views_readablepizza_change", args=(pizza.pk,)),
post_data,
)
self.assertRedirects(
response, reverse("admin:admin_views_readablepizza_changelist")
)
pizza_ctype = ContentType.objects.get_for_model(
ReadablePizza, for_concrete_model=False
)
log = LogEntry.objects.filter(
content_type=pizza_ctype, object_id=pizza.pk
).first()
self.assertEqual(log.get_change_message(), "Changed Toppings.")
def test_allows_attributeerror_to_bubble_up(self):
"""
AttributeErrors are allowed to bubble when raised inside a change list
view. Requires a model to be created so there's something to display.
Refs: #16655, #18593, and #18747
"""
Simple.objects.create()
with self.assertRaises(AttributeError):
self.client.get(reverse("admin:admin_views_simple_changelist"))
def test_changelist_with_no_change_url(self):
"""
ModelAdmin.changelist_view shouldn't result in a NoReverseMatch if url
for change_view is removed from get_urls (#20934).
"""
o = UnchangeableObject.objects.create()
response = self.client.get(
reverse("admin:admin_views_unchangeableobject_changelist")
)
# Check the format of the shown object -- shouldn't contain a change link
self.assertContains(
response, '<th class="field-__str__">%s</th>' % o, html=True
)
def test_invalid_appindex_url(self):
"""
#21056 -- URL reversing shouldn't work for nonexistent apps.
"""
good_url = "/test_admin/admin/admin_views/"
confirm_good_url = reverse(
"admin:app_list", kwargs={"app_label": "admin_views"}
)
self.assertEqual(good_url, confirm_good_url)
with self.assertRaises(NoReverseMatch):
reverse("admin:app_list", kwargs={"app_label": "this_should_fail"})
with self.assertRaises(NoReverseMatch):
reverse("admin:app_list", args=("admin_views2",))
def test_resolve_admin_views(self):
index_match = resolve("/test_admin/admin4/")
list_match = resolve("/test_admin/admin4/auth/user/")
self.assertIs(index_match.func.admin_site, customadmin.simple_site)
self.assertIsInstance(
list_match.func.model_admin, customadmin.CustomPwdTemplateUserAdmin
)
def test_adminsite_display_site_url(self):
"""
#13749 - Admin should display link to front-end site 'View site'
"""
url = reverse("admin:index")
response = self.client.get(url)
self.assertEqual(response.context["site_url"], "/my-site-url/")
self.assertContains(response, '<a href="/my-site-url/">View site</a>')
def test_date_hierarchy_empty_queryset(self):
self.assertIs(Question.objects.exists(), False)
response = self.client.get(reverse("admin:admin_views_answer2_changelist"))
self.assertEqual(response.status_code, 200)
@override_settings(TIME_ZONE="America/Sao_Paulo", USE_TZ=True)
def test_date_hierarchy_timezone_dst(self):
# This datetime doesn't exist in this timezone due to DST.
for date in make_aware_datetimes(
datetime.datetime(2016, 10, 16, 15), "America/Sao_Paulo"
):
with self.subTest(repr(date.tzinfo)):
q = Question.objects.create(question="Why?", expires=date)
Answer2.objects.create(question=q, answer="Because.")
response = self.client.get(
reverse("admin:admin_views_answer2_changelist")
)
self.assertContains(response, "question__expires__day=16")
self.assertContains(response, "question__expires__month=10")
self.assertContains(response, "question__expires__year=2016")
@override_settings(TIME_ZONE="America/Los_Angeles", USE_TZ=True)
def test_date_hierarchy_local_date_differ_from_utc(self):
# This datetime is 2017-01-01 in UTC.
for date in make_aware_datetimes(
datetime.datetime(2016, 12, 31, 16), "America/Los_Angeles"
):
with self.subTest(repr(date.tzinfo)):
q = Question.objects.create(question="Why?", expires=date)
Answer2.objects.create(question=q, answer="Because.")
response = self.client.get(
reverse("admin:admin_views_answer2_changelist")
)
self.assertContains(response, "question__expires__day=31")
self.assertContains(response, "question__expires__month=12")
self.assertContains(response, "question__expires__year=2016")
def test_sortable_by_columns_subset(self):
expected_sortable_fields = ("date", "callable_year")
expected_not_sortable_fields = (
"content",
"model_year",
"modeladmin_year",
"model_year_reversed",
"section",
)
response = self.client.get(reverse("admin6:admin_views_article_changelist"))
for field_name in expected_sortable_fields:
self.assertContains(
response, '<th scope="col" class="sortable column-%s">' % field_name
)
for field_name in expected_not_sortable_fields:
self.assertContains(
response, '<th scope="col" class="column-%s">' % field_name
)
def test_get_sortable_by_columns_subset(self):
response = self.client.get(reverse("admin6:admin_views_actor_changelist"))
self.assertContains(response, '<th scope="col" class="sortable column-age">')
self.assertContains(response, '<th scope="col" class="column-name">')
def test_sortable_by_no_column(self):
expected_not_sortable_fields = ("title", "book")
response = self.client.get(reverse("admin6:admin_views_chapter_changelist"))
for field_name in expected_not_sortable_fields:
self.assertContains(
response, '<th scope="col" class="column-%s">' % field_name
)
self.assertNotContains(response, '<th scope="col" class="sortable column')
def test_get_sortable_by_no_column(self):
response = self.client.get(reverse("admin6:admin_views_color_changelist"))
self.assertContains(response, '<th scope="col" class="column-value">')
self.assertNotContains(response, '<th scope="col" class="sortable column')
def test_app_index_context(self):
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertContains(
response,
"<title>Admin_Views administration | Django site admin</title>",
)
self.assertEqual(response.context["title"], "Admin_Views administration")
self.assertEqual(response.context["app_label"], "admin_views")
# Models are sorted alphabetically by default.
models = [model["name"] for model in response.context["app_list"][0]["models"]]
self.assertSequenceEqual(models, sorted(models))
def test_app_index_context_reordered(self):
self.client.force_login(self.superuser)
response = self.client.get(reverse("admin2:app_list", args=("admin_views",)))
self.assertContains(
response,
"<title>Admin_Views administration | Django site admin</title>",
)
# Models are in reverse order.
models = [model["name"] for model in response.context["app_list"][0]["models"]]
self.assertSequenceEqual(models, sorted(models, reverse=True))
def test_change_view_subtitle_per_object(self):
response = self.client.get(
reverse("admin:admin_views_article_change", args=(self.a1.pk,)),
)
self.assertContains(
response,
"<title>Article 1 | Change article | Django site admin</title>",
)
self.assertContains(response, "<h1>Change article</h1>")
self.assertContains(response, "<h2>Article 1</h2>")
response = self.client.get(
reverse("admin:admin_views_article_change", args=(self.a2.pk,)),
)
self.assertContains(
response,
"<title>Article 2 | Change article | Django site admin</title>",
)
self.assertContains(response, "<h1>Change article</h1>")
self.assertContains(response, "<h2>Article 2</h2>")
def test_view_subtitle_per_object(self):
viewuser = User.objects.create_user(
username="viewuser",
password="secret",
is_staff=True,
)
viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("view", Article._meta)),
)
self.client.force_login(viewuser)
response = self.client.get(
reverse("admin:admin_views_article_change", args=(self.a1.pk,)),
)
self.assertContains(
response,
"<title>Article 1 | View article | Django site admin</title>",
)
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(response, "<h2>Article 1</h2>")
response = self.client.get(
reverse("admin:admin_views_article_change", args=(self.a2.pk,)),
)
self.assertContains(
response,
"<title>Article 2 | View article | Django site admin</title>",
)
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(response, "<h2>Article 2</h2>")
def test_formset_kwargs_can_be_overridden(self):
response = self.client.get(reverse("admin:admin_views_city_add"))
self.assertContains(response, "overridden_name")
def test_render_views_no_subtitle(self):
tests = [
reverse("admin:index"),
reverse("admin:password_change"),
reverse("admin:app_list", args=("admin_views",)),
reverse("admin:admin_views_article_delete", args=(self.a1.pk,)),
reverse("admin:admin_views_article_history", args=(self.a1.pk,)),
]
for url in tests:
with self.subTest(url=url):
with self.assertNoLogs("django.template", "DEBUG"):
self.client.get(url)
# Login must be after logout.
with self.assertNoLogs("django.template", "DEBUG"):
self.client.post(reverse("admin:logout"))
self.client.get(reverse("admin:login"))
def test_render_delete_selected_confirmation_no_subtitle(self):
post_data = {
"action": "delete_selected",
"selected_across": "0",
"index": "0",
"_selected_action": self.a1.pk,
}
with self.assertNoLogs("django.template", "DEBUG"):
self.client.post(reverse("admin:admin_views_article_changelist"), post_data)
@override_settings(
AUTH_PASSWORD_VALIDATORS=[
{
"NAME": (
"django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator"
)
},
{
"NAME": (
"django.contrib.auth.password_validation."
"NumericPasswordValidator"
)
},
]
)
def test_password_change_helptext(self):
response = self.client.get(reverse("admin:password_change"))
self.assertContains(
response, '<div class="help" id="id_new_password1_helptext">'
)
@override_settings(
AUTH_PASSWORD_VALIDATORS=[
{
"NAME": (
"django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator"
)
},
{
"NAME": (
"django.contrib.auth.password_validation." "NumericPasswordValidator"
)
},
],
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
# Put this app's and the shared tests templates dirs in DIRS to
# take precedence over the admin's templates dir.
"DIRS": [
os.path.join(os.path.dirname(__file__), "templates"),
os.path.join(os.path.dirname(os.path.dirname(__file__)), "templates"),
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
class AdminCustomTemplateTests(AdminViewBasicTestCase):
def test_custom_model_admin_templates(self):
# Test custom change list template with custom extra context
response = self.client.get(
reverse("admin:admin_views_customarticle_changelist")
)
self.assertContains(response, "var hello = 'Hello!';")
self.assertTemplateUsed(response, "custom_admin/change_list.html")
# Test custom add form template
response = self.client.get(reverse("admin:admin_views_customarticle_add"))
self.assertTemplateUsed(response, "custom_admin/add_form.html")
# Add an article so we can test delete, change, and history views
post = self.client.post(
reverse("admin:admin_views_customarticle_add"),
{
"content": "<p>great article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
},
)
self.assertRedirects(
post, reverse("admin:admin_views_customarticle_changelist")
)
self.assertEqual(CustomArticle.objects.count(), 1)
article_pk = CustomArticle.objects.all()[0].pk
# Test custom delete, change, and object history templates
# Test custom change form template
response = self.client.get(
reverse("admin:admin_views_customarticle_change", args=(article_pk,))
)
self.assertTemplateUsed(response, "custom_admin/change_form.html")
response = self.client.get(
reverse("admin:admin_views_customarticle_delete", args=(article_pk,))
)
self.assertTemplateUsed(response, "custom_admin/delete_confirmation.html")
response = self.client.post(
reverse("admin:admin_views_customarticle_changelist"),
data={
"index": 0,
"action": ["delete_selected"],
"_selected_action": ["1"],
},
)
self.assertTemplateUsed(
response, "custom_admin/delete_selected_confirmation.html"
)
response = self.client.get(
reverse("admin:admin_views_customarticle_history", args=(article_pk,))
)
self.assertTemplateUsed(response, "custom_admin/object_history.html")
# A custom popup response template may be specified by
# ModelAdmin.popup_response_template.
response = self.client.post(
reverse("admin:admin_views_customarticle_add") + "?%s=1" % IS_POPUP_VAR,
{
"content": "<p>great article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
IS_POPUP_VAR: "1",
},
)
self.assertEqual(response.template_name, "custom_admin/popup_response.html")
def test_extended_bodyclass_template_change_form(self):
"""
The admin/change_form.html template uses block.super in the
bodyclass block.
"""
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_change_password_template(self):
user = User.objects.get(username="super")
response = self.client.get(
reverse("admin:auth_user_password_change", args=(user.id,))
)
# The auth/user/change_password.html template uses super in the
# bodyclass block.
self.assertContains(response, "bodyclass_consistency_check ")
# When a site has multiple passwords in the browser's password manager,
# a browser pop up asks which user the new password is for. To prevent
# this, the username is added to the change password form.
self.assertContains(
response, '<input type="text" name="username" value="super" class="hidden">'
)
# help text for passwords has an id.
self.assertContains(
response,
'<div class="help" id="id_password1_helptext"><ul><li>'
"Your password can’t be too similar to your other personal information."
"</li><li>Your password can’t be entirely numeric.</li></ul></div>",
)
self.assertContains(
response,
'<div class="help" id="id_password2_helptext">'
"Enter the same password as before, for verification.</div>",
)
def test_extended_bodyclass_template_index(self):
"""
The admin/index.html template uses block.super in the bodyclass block.
"""
response = self.client.get(reverse("admin:index"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_change_list(self):
"""
The admin/change_list.html' template uses block.super
in the bodyclass block.
"""
response = self.client.get(reverse("admin:admin_views_article_changelist"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_template_login(self):
"""
The admin/login.html template uses block.super in the
bodyclass block.
"""
self.client.logout()
response = self.client.get(reverse("admin:login"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_template_delete_confirmation(self):
"""
The admin/delete_confirmation.html template uses
block.super in the bodyclass block.
"""
group = Group.objects.create(name="foogroup")
response = self.client.get(reverse("admin:auth_group_delete", args=(group.id,)))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_template_delete_selected_confirmation(self):
"""
The admin/delete_selected_confirmation.html template uses
block.super in bodyclass block.
"""
group = Group.objects.create(name="foogroup")
post_data = {
"action": "delete_selected",
"selected_across": "0",
"index": "0",
"_selected_action": group.id,
}
response = self.client.post(reverse("admin:auth_group_changelist"), post_data)
self.assertEqual(response.context["site_header"], "Django administration")
self.assertContains(response, "bodyclass_consistency_check ")
def test_filter_with_custom_template(self):
"""
A custom template can be used to render an admin filter.
"""
response = self.client.get(reverse("admin:admin_views_color2_changelist"))
self.assertTemplateUsed(response, "custom_filter_template.html")
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewFormUrlTest(TestCase):
current_app = "admin3"
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_change_form_URL_has_correct_value(self):
"""
change_view has form_url in response.context
"""
response = self.client.get(
reverse(
"admin:admin_views_section_change",
args=(self.s1.pk,),
current_app=self.current_app,
)
)
self.assertIn(
"form_url", response.context, msg="form_url not present in response.context"
)
self.assertEqual(response.context["form_url"], "pony")
def test_initial_data_can_be_overridden(self):
"""
The behavior for setting initial form data can be overridden in the
ModelAdmin class. Usually, the initial value is set via the GET params.
"""
response = self.client.get(
reverse("admin:admin_views_restaurant_add", current_app=self.current_app),
{"name": "test_value"},
)
# this would be the usual behaviour
self.assertNotContains(response, 'value="test_value"')
# this is the overridden behaviour
self.assertContains(response, 'value="overridden_value"')
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminJavaScriptTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_js_minified_only_if_debug_is_false(self):
"""
The minified versions of the JS files are only used when DEBUG is False.
"""
with override_settings(DEBUG=False):
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertNotContains(response, "vendor/jquery/jquery.js")
self.assertContains(response, "vendor/jquery/jquery.min.js")
self.assertContains(response, "prepopulate.js")
self.assertContains(response, "actions.js")
self.assertContains(response, "collapse.js")
self.assertContains(response, "inlines.js")
with override_settings(DEBUG=True):
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertContains(response, "vendor/jquery/jquery.js")
self.assertNotContains(response, "vendor/jquery/jquery.min.js")
self.assertContains(response, "prepopulate.js")
self.assertContains(response, "actions.js")
self.assertContains(response, "collapse.js")
self.assertContains(response, "inlines.js")
@override_settings(ROOT_URLCONF="admin_views.urls")
class SaveAsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.per1 = Person.objects.create(name="John Mauchly", gender=1, alive=True)
def setUp(self):
self.client.force_login(self.superuser)
def test_save_as_duplication(self):
"""'save as' creates a new person"""
post_data = {"_saveasnew": "", "name": "John M", "gender": 1, "age": 42}
response = self.client.post(
reverse("admin:admin_views_person_change", args=(self.per1.pk,)), post_data
)
self.assertEqual(len(Person.objects.filter(name="John M")), 1)
self.assertEqual(len(Person.objects.filter(id=self.per1.pk)), 1)
new_person = Person.objects.latest("id")
self.assertRedirects(
response, reverse("admin:admin_views_person_change", args=(new_person.pk,))
)
def test_save_as_continue_false(self):
"""
Saving a new object using "Save as new" redirects to the changelist
instead of the change view when ModelAdmin.save_as_continue=False.
"""
post_data = {"_saveasnew": "", "name": "John M", "gender": 1, "age": 42}
url = reverse(
"admin:admin_views_person_change",
args=(self.per1.pk,),
current_app=site2.name,
)
response = self.client.post(url, post_data)
self.assertEqual(len(Person.objects.filter(name="John M")), 1)
self.assertEqual(len(Person.objects.filter(id=self.per1.pk)), 1)
self.assertRedirects(
response,
reverse("admin:admin_views_person_changelist", current_app=site2.name),
)
def test_save_as_new_with_validation_errors(self):
"""
When you click "Save as new" and have a validation error,
you only see the "Save as new" button and not the other save buttons,
and that only the "Save as" button is visible.
"""
response = self.client.post(
reverse("admin:admin_views_person_change", args=(self.per1.pk,)),
{
"_saveasnew": "",
"gender": "invalid",
"_addanother": "fail",
},
)
self.assertContains(response, "Please correct the errors below.")
self.assertFalse(response.context["show_save_and_add_another"])
self.assertFalse(response.context["show_save_and_continue"])
self.assertTrue(response.context["show_save_as_new"])
def test_save_as_new_with_validation_errors_with_inlines(self):
parent = Parent.objects.create(name="Father")
child = Child.objects.create(parent=parent, name="Child")
response = self.client.post(
reverse("admin:admin_views_parent_change", args=(parent.pk,)),
{
"_saveasnew": "Save as new",
"child_set-0-parent": parent.pk,
"child_set-0-id": child.pk,
"child_set-0-name": "Child",
"child_set-INITIAL_FORMS": 1,
"child_set-MAX_NUM_FORMS": 1000,
"child_set-MIN_NUM_FORMS": 0,
"child_set-TOTAL_FORMS": 4,
"name": "_invalid",
},
)
self.assertContains(response, "Please correct the error below.")
self.assertFalse(response.context["show_save_and_add_another"])
self.assertFalse(response.context["show_save_and_continue"])
self.assertTrue(response.context["show_save_as_new"])
def test_save_as_new_with_inlines_with_validation_errors(self):
parent = Parent.objects.create(name="Father")
child = Child.objects.create(parent=parent, name="Child")
response = self.client.post(
reverse("admin:admin_views_parent_change", args=(parent.pk,)),
{
"_saveasnew": "Save as new",
"child_set-0-parent": parent.pk,
"child_set-0-id": child.pk,
"child_set-0-name": "_invalid",
"child_set-INITIAL_FORMS": 1,
"child_set-MAX_NUM_FORMS": 1000,
"child_set-MIN_NUM_FORMS": 0,
"child_set-TOTAL_FORMS": 4,
"name": "Father",
},
)
self.assertContains(response, "Please correct the error below.")
self.assertFalse(response.context["show_save_and_add_another"])
self.assertFalse(response.context["show_save_and_continue"])
self.assertTrue(response.context["show_save_as_new"])
@override_settings(ROOT_URLCONF="admin_views.urls")
class CustomModelAdminTest(AdminViewBasicTestCase):
def test_custom_admin_site_login_form(self):
self.client.logout()
response = self.client.get(reverse("admin2:index"), follow=True)
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
login = self.client.post(
reverse("admin2:login"),
{
REDIRECT_FIELD_NAME: reverse("admin2:index"),
"username": "customform",
"password": "secret",
},
follow=True,
)
self.assertIsInstance(login, TemplateResponse)
self.assertContains(login, "custom form error")
self.assertContains(login, "path/to/media.css")
def test_custom_admin_site_login_template(self):
self.client.logout()
response = self.client.get(reverse("admin2:index"), follow=True)
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/login.html")
self.assertContains(response, "Hello from a custom login template")
def test_custom_admin_site_logout_template(self):
response = self.client.post(reverse("admin2:logout"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/logout.html")
self.assertContains(response, "Hello from a custom logout template")
def test_custom_admin_site_index_view_and_template(self):
response = self.client.get(reverse("admin2:index"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/index.html")
self.assertContains(response, "Hello from a custom index template *bar*")
def test_custom_admin_site_app_index_view_and_template(self):
response = self.client.get(reverse("admin2:app_list", args=("admin_views",)))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/app_index.html")
self.assertContains(response, "Hello from a custom app_index template")
def test_custom_admin_site_password_change_template(self):
response = self.client.get(reverse("admin2:password_change"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/password_change_form.html")
self.assertContains(
response, "Hello from a custom password change form template"
)
def test_custom_admin_site_password_change_with_extra_context(self):
response = self.client.get(reverse("admin2:password_change"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/password_change_form.html")
self.assertContains(response, "eggs")
def test_custom_admin_site_password_change_done_template(self):
response = self.client.get(reverse("admin2:password_change_done"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/password_change_done.html")
self.assertContains(
response, "Hello from a custom password change done template"
)
def test_custom_admin_site_view(self):
self.client.force_login(self.superuser)
response = self.client.get(reverse("admin2:my_view"))
self.assertEqual(response.content, b"Django is a magical pony!")
def test_pwd_change_custom_template(self):
self.client.force_login(self.superuser)
su = User.objects.get(username="super")
response = self.client.get(
reverse("admin4:auth_user_password_change", args=(su.pk,))
)
self.assertEqual(response.status_code, 200)
def get_perm(Model, codename):
"""Return the permission object, for the Model"""
ct = ContentType.objects.get_for_model(Model, for_concrete_model=False)
return Permission.objects.get(content_type=ct, codename=codename)
@override_settings(
ROOT_URLCONF="admin_views.urls",
# Test with the admin's documented list of required context processors.
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
class AdminViewPermissionsTest(TestCase):
"""Tests for Admin Views Permissions."""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.viewuser = User.objects.create_user(
username="viewuser", password="secret", is_staff=True
)
cls.adduser = User.objects.create_user(
username="adduser", password="secret", is_staff=True
)
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.deleteuser = User.objects.create_user(
username="deleteuser", password="secret", is_staff=True
)
cls.joepublicuser = User.objects.create_user(
username="joepublic", password="secret"
)
cls.nostaffuser = User.objects.create_user(
username="nostaff", password="secret"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
another_section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
# Setup permissions, for our users who can add, change, and delete.
opts = Article._meta
# User who can view Articles
cls.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("view", opts))
)
# User who can add Articles
cls.adduser.user_permissions.add(
get_perm(Article, get_permission_codename("add", opts))
)
# User who can change Articles
cls.changeuser.user_permissions.add(
get_perm(Article, get_permission_codename("change", opts))
)
cls.nostaffuser.user_permissions.add(
get_perm(Article, get_permission_codename("change", opts))
)
# User who can delete Articles
cls.deleteuser.user_permissions.add(
get_perm(Article, get_permission_codename("delete", opts))
)
cls.deleteuser.user_permissions.add(
get_perm(Section, get_permission_codename("delete", Section._meta))
)
# login POST dicts
cls.index_url = reverse("admin:index")
cls.super_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "super",
"password": "secret",
}
cls.super_email_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "[email protected]",
"password": "secret",
}
cls.super_email_bad_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "[email protected]",
"password": "notsecret",
}
cls.adduser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "adduser",
"password": "secret",
}
cls.changeuser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "changeuser",
"password": "secret",
}
cls.deleteuser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "deleteuser",
"password": "secret",
}
cls.nostaff_login = {
REDIRECT_FIELD_NAME: reverse("has_permission_admin:index"),
"username": "nostaff",
"password": "secret",
}
cls.joepublic_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "joepublic",
"password": "secret",
}
cls.viewuser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "viewuser",
"password": "secret",
}
cls.no_username_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"password": "secret",
}
def test_login(self):
"""
Make sure only staff members can log in.
Successful posts to the login page will redirect to the original url.
Unsuccessful attempts will continue to render the login page with
a 200 status code.
"""
login_url = "%s?next=%s" % (reverse("admin:login"), reverse("admin:index"))
# Super User
response = self.client.get(self.index_url)
self.assertRedirects(response, login_url)
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Test if user enters email address
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.super_email_login)
self.assertContains(login, ERROR_MESSAGE)
# only correct passwords get a username hint
login = self.client.post(login_url, self.super_email_bad_login)
self.assertContains(login, ERROR_MESSAGE)
new_user = User(username="jondoe", password="secret", email="[email protected]")
new_user.save()
# check to ensure if there are multiple email addresses a user doesn't get a 500
login = self.client.post(login_url, self.super_email_login)
self.assertContains(login, ERROR_MESSAGE)
# View User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.viewuser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Add User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.adduser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Change User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.changeuser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Delete User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.deleteuser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Regular User should not be able to login.
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.joepublic_login)
self.assertContains(login, ERROR_MESSAGE)
# Requests without username should not return 500 errors.
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.no_username_login)
self.assertEqual(login.status_code, 200)
self.assertFormError(
login.context["form"], "username", ["This field is required."]
)
def test_login_redirect_for_direct_get(self):
"""
Login redirect should be to the admin index page when going directly to
/admin/login/.
"""
response = self.client.get(reverse("admin:login"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context[REDIRECT_FIELD_NAME], reverse("admin:index"))
def test_login_has_permission(self):
# Regular User should not be able to login.
response = self.client.get(reverse("has_permission_admin:index"))
self.assertEqual(response.status_code, 302)
login = self.client.post(
reverse("has_permission_admin:login"), self.joepublic_login
)
self.assertContains(login, "permission denied")
# User with permissions should be able to login.
response = self.client.get(reverse("has_permission_admin:index"))
self.assertEqual(response.status_code, 302)
login = self.client.post(
reverse("has_permission_admin:login"), self.nostaff_login
)
self.assertRedirects(login, reverse("has_permission_admin:index"))
self.assertFalse(login.context)
self.client.post(reverse("has_permission_admin:logout"))
# Staff should be able to login.
response = self.client.get(reverse("has_permission_admin:index"))
self.assertEqual(response.status_code, 302)
login = self.client.post(
reverse("has_permission_admin:login"),
{
REDIRECT_FIELD_NAME: reverse("has_permission_admin:index"),
"username": "deleteuser",
"password": "secret",
},
)
self.assertRedirects(login, reverse("has_permission_admin:index"))
self.assertFalse(login.context)
self.client.post(reverse("has_permission_admin:logout"))
def test_login_successfully_redirects_to_original_URL(self):
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
query_string = "the-answer=42"
redirect_url = "%s?%s" % (self.index_url, query_string)
new_next = {REDIRECT_FIELD_NAME: redirect_url}
post_data = self.super_login.copy()
post_data.pop(REDIRECT_FIELD_NAME)
login = self.client.post(
"%s?%s" % (reverse("admin:login"), urlencode(new_next)), post_data
)
self.assertRedirects(login, redirect_url)
def test_double_login_is_not_allowed(self):
"""Regression test for #19327"""
login_url = "%s?next=%s" % (reverse("admin:login"), reverse("admin:index"))
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
# Establish a valid admin session
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
# Logging in with non-admin user fails
login = self.client.post(login_url, self.joepublic_login)
self.assertContains(login, ERROR_MESSAGE)
# Establish a valid admin session
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
# Logging in with admin user while already logged in
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
def test_login_page_notice_for_non_staff_users(self):
"""
A logged-in non-staff user trying to access the admin index should be
presented with the login page and a hint indicating that the current
user doesn't have access to it.
"""
hint_template = "You are authenticated as {}"
# Anonymous user should not be shown the hint
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, "login-form")
self.assertNotContains(response, hint_template.format(""), status_code=200)
# Non-staff user should be shown the hint
self.client.force_login(self.nostaffuser)
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, "login-form")
self.assertContains(
response, hint_template.format(self.nostaffuser.username), status_code=200
)
def test_add_view(self):
"""Test add view restricts access and actually adds items."""
add_dict = {
"title": "Døm ikke",
"content": "<p>great article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
"section": self.s1.pk,
}
# Change User should not have access to add articles
self.client.force_login(self.changeuser)
# make sure the view removes test cookie
self.assertIs(self.client.session.test_cookie_worked(), False)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.status_code, 403)
# Try POST just to make sure
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.post(reverse("admin:logout"))
# View User should not have access to add articles
self.client.force_login(self.viewuser)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.status_code, 403)
# Try POST just to make sure
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
# Now give the user permission to add but not change.
self.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("add", Article._meta))
)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.context["title"], "Add article")
self.assertContains(response, "<title>Add article | Django site admin</title>")
self.assertContains(
response, '<input type="submit" value="Save and view" name="_continue">'
)
post = self.client.post(
reverse("admin:admin_views_article_add"), add_dict, follow=False
)
self.assertEqual(post.status_code, 302)
self.assertEqual(Article.objects.count(), 4)
article = Article.objects.latest("pk")
response = self.client.get(
reverse("admin:admin_views_article_change", args=(article.pk,))
)
self.assertContains(
response,
'<li class="success">The article “Døm ikke” was added successfully.</li>',
)
article.delete()
self.client.post(reverse("admin:logout"))
# Add user may login and POST to add view, then redirect to admin root
self.client.force_login(self.adduser)
addpage = self.client.get(reverse("admin:admin_views_article_add"))
change_list_link = '› <a href="%s">Articles</a>' % reverse(
"admin:admin_views_article_changelist"
)
self.assertNotContains(
addpage,
change_list_link,
msg_prefix=(
"User restricted to add permission is given link to change list view "
"in breadcrumbs."
),
)
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertRedirects(post, self.index_url)
self.assertEqual(Article.objects.count(), 4)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, "Greetings from a created object")
self.client.post(reverse("admin:logout"))
# The addition was logged correctly
addition_log = LogEntry.objects.all()[0]
new_article = Article.objects.last()
article_ct = ContentType.objects.get_for_model(Article)
self.assertEqual(addition_log.user_id, self.adduser.pk)
self.assertEqual(addition_log.content_type_id, article_ct.pk)
self.assertEqual(addition_log.object_id, str(new_article.pk))
self.assertEqual(addition_log.object_repr, "Døm ikke")
self.assertEqual(addition_log.action_flag, ADDITION)
self.assertEqual(addition_log.get_change_message(), "Added.")
# Super can add too, but is redirected to the change list view
self.client.force_login(self.superuser)
addpage = self.client.get(reverse("admin:admin_views_article_add"))
self.assertContains(
addpage,
change_list_link,
msg_prefix=(
"Unrestricted user is not given link to change list view in "
"breadcrumbs."
),
)
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertRedirects(post, reverse("admin:admin_views_article_changelist"))
self.assertEqual(Article.objects.count(), 5)
self.client.post(reverse("admin:logout"))
# 8509 - if a normal user is already logged in, it is possible
# to change user into the superuser without error
self.client.force_login(self.joepublicuser)
# Check and make sure that if user expires, data still persists
self.client.force_login(self.superuser)
# make sure the view removes test cookie
self.assertIs(self.client.session.test_cookie_worked(), False)
@mock.patch("django.contrib.admin.options.InlineModelAdmin.has_change_permission")
def test_add_view_with_view_only_inlines(self, has_change_permission):
"""User with add permission to a section but view-only for inlines."""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("add", Section._meta))
)
self.client.force_login(self.viewuser)
# Valid POST creates a new section.
data = {
"name": "New obj",
"article_set-TOTAL_FORMS": 0,
"article_set-INITIAL_FORMS": 0,
}
response = self.client.post(reverse("admin:admin_views_section_add"), data)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(Section.objects.latest("id").name, data["name"])
# InlineModelAdmin.has_change_permission()'s obj argument is always
# None during object add.
self.assertEqual(
[obj for (request, obj), _ in has_change_permission.call_args_list],
[None, None],
)
def test_change_view(self):
"""Change view should restrict access and allow users to edit items."""
change_dict = {
"title": "Ikke fordømt",
"content": "<p>edited article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
"section": self.s1.pk,
}
article_change_url = reverse(
"admin:admin_views_article_change", args=(self.a1.pk,)
)
article_changelist_url = reverse("admin:admin_views_article_changelist")
# add user should not be able to view the list of article or change any of them
self.client.force_login(self.adduser)
response = self.client.get(article_changelist_url)
self.assertEqual(response.status_code, 403)
response = self.client.get(article_change_url)
self.assertEqual(response.status_code, 403)
post = self.client.post(article_change_url, change_dict)
self.assertEqual(post.status_code, 403)
self.client.post(reverse("admin:logout"))
# view user can view articles but not make changes.
self.client.force_login(self.viewuser)
response = self.client.get(article_changelist_url)
self.assertContains(
response,
"<title>Select article to view | Django site admin</title>",
)
self.assertContains(response, "<h1>Select article to view</h1>")
self.assertEqual(response.context["title"], "Select article to view")
response = self.client.get(article_change_url)
self.assertContains(response, "<title>View article | Django site admin</title>")
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(response, "<label>Extra form field:</label>")
self.assertContains(
response,
'<a href="/test_admin/admin/admin_views/article/" class="closelink">Close'
"</a>",
)
self.assertEqual(response.context["title"], "View article")
post = self.client.post(article_change_url, change_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(
Article.objects.get(pk=self.a1.pk).content, "<p>Middle content</p>"
)
self.client.post(reverse("admin:logout"))
# change user can view all items and edit them
self.client.force_login(self.changeuser)
response = self.client.get(article_changelist_url)
self.assertEqual(response.context["title"], "Select article to change")
self.assertContains(
response,
"<title>Select article to change | Django site admin</title>",
)
self.assertContains(response, "<h1>Select article to change</h1>")
response = self.client.get(article_change_url)
self.assertEqual(response.context["title"], "Change article")
self.assertContains(
response,
"<title>Change article | Django site admin</title>",
)
self.assertContains(response, "<h1>Change article</h1>")
post = self.client.post(article_change_url, change_dict)
self.assertRedirects(post, article_changelist_url)
self.assertEqual(
Article.objects.get(pk=self.a1.pk).content, "<p>edited article</p>"
)
# one error in form should produce singular error message, multiple
# errors plural.
change_dict["title"] = ""
post = self.client.post(article_change_url, change_dict)
self.assertContains(
post,
"Please correct the error below.",
msg_prefix=(
"Singular error message not found in response to post with one error"
),
)
change_dict["content"] = ""
post = self.client.post(article_change_url, change_dict)
self.assertContains(
post,
"Please correct the errors below.",
msg_prefix=(
"Plural error message not found in response to post with multiple "
"errors"
),
)
self.client.post(reverse("admin:logout"))
# Test redirection when using row-level change permissions. Refs #11513.
r1 = RowLevelChangePermissionModel.objects.create(id=1, name="odd id")
r2 = RowLevelChangePermissionModel.objects.create(id=2, name="even id")
r3 = RowLevelChangePermissionModel.objects.create(id=3, name="odd id mult 3")
r6 = RowLevelChangePermissionModel.objects.create(id=6, name="even id mult 3")
change_url_1 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r1.pk,)
)
change_url_2 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r2.pk,)
)
change_url_3 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r3.pk,)
)
change_url_6 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r6.pk,)
)
logins = [
self.superuser,
self.viewuser,
self.adduser,
self.changeuser,
self.deleteuser,
]
for login_user in logins:
with self.subTest(login_user.username):
self.client.force_login(login_user)
response = self.client.get(change_url_1)
self.assertEqual(response.status_code, 403)
response = self.client.post(change_url_1, {"name": "changed"})
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=1).name, "odd id"
)
self.assertEqual(response.status_code, 403)
response = self.client.get(change_url_2)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_2, {"name": "changed"})
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=2).name, "changed"
)
self.assertRedirects(response, self.index_url)
response = self.client.get(change_url_3)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_3, {"name": "changed"})
self.assertEqual(response.status_code, 403)
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=3).name,
"odd id mult 3",
)
response = self.client.get(change_url_6)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_6, {"name": "changed"})
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=6).name, "changed"
)
self.assertRedirects(response, self.index_url)
self.client.post(reverse("admin:logout"))
for login_user in [self.joepublicuser, self.nostaffuser]:
with self.subTest(login_user.username):
self.client.force_login(login_user)
response = self.client.get(change_url_1, follow=True)
self.assertContains(response, "login-form")
response = self.client.post(
change_url_1, {"name": "changed"}, follow=True
)
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=1).name, "odd id"
)
self.assertContains(response, "login-form")
response = self.client.get(change_url_2, follow=True)
self.assertContains(response, "login-form")
response = self.client.post(
change_url_2, {"name": "changed again"}, follow=True
)
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=2).name, "changed"
)
self.assertContains(response, "login-form")
self.client.post(reverse("admin:logout"))
def test_change_view_without_object_change_permission(self):
"""
The object should be read-only if the user has permission to view it
and change objects of that type but not to change the current object.
"""
change_url = reverse("admin9:admin_views_article_change", args=(self.a1.pk,))
self.client.force_login(self.viewuser)
response = self.client.get(change_url)
self.assertEqual(response.context["title"], "View article")
self.assertContains(response, "<title>View article | Django site admin</title>")
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(
response,
'<a href="/test_admin/admin9/admin_views/article/" class="closelink">Close'
"</a>",
)
def test_change_view_save_as_new(self):
"""
'Save as new' should raise PermissionDenied for users without the 'add'
permission.
"""
change_dict_save_as_new = {
"_saveasnew": "Save as new",
"title": "Ikke fordømt",
"content": "<p>edited article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
"section": self.s1.pk,
}
article_change_url = reverse(
"admin:admin_views_article_change", args=(self.a1.pk,)
)
# Add user can perform "Save as new".
article_count = Article.objects.count()
self.client.force_login(self.adduser)
post = self.client.post(article_change_url, change_dict_save_as_new)
self.assertRedirects(post, self.index_url)
self.assertEqual(Article.objects.count(), article_count + 1)
self.client.logout()
# Change user cannot perform "Save as new" (no 'add' permission).
article_count = Article.objects.count()
self.client.force_login(self.changeuser)
post = self.client.post(article_change_url, change_dict_save_as_new)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), article_count)
# User with both add and change permissions should be redirected to the
# change page for the newly created object.
article_count = Article.objects.count()
self.client.force_login(self.superuser)
post = self.client.post(article_change_url, change_dict_save_as_new)
self.assertEqual(Article.objects.count(), article_count + 1)
new_article = Article.objects.latest("id")
self.assertRedirects(
post, reverse("admin:admin_views_article_change", args=(new_article.pk,))
)
def test_change_view_with_view_only_inlines(self):
"""
User with change permission to a section but view-only for inlines.
"""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("change", Section._meta))
)
self.client.force_login(self.viewuser)
# GET shows inlines.
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 3)
# Valid POST changes the name.
data = {
"name": "Can edit name with view-only inlines",
"article_set-TOTAL_FORMS": 3,
"article_set-INITIAL_FORMS": 3,
}
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Section.objects.get(pk=self.s1.pk).name, data["name"])
# Invalid POST reshows inlines.
del data["name"]
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 3)
def test_change_view_with_view_only_last_inline(self):
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("view", Section._meta))
)
self.client.force_login(self.viewuser)
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 3)
# The last inline is not marked as empty.
self.assertContains(response, 'id="article_set-2"')
def test_change_view_with_view_and_add_inlines(self):
"""User has view and add permissions on the inline model."""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("change", Section._meta))
)
self.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("add", Article._meta))
)
self.client.force_login(self.viewuser)
# GET shows inlines.
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 6)
# Valid POST creates a new article.
data = {
"name": "Can edit name with view-only inlines",
"article_set-TOTAL_FORMS": 6,
"article_set-INITIAL_FORMS": 3,
"article_set-3-id": [""],
"article_set-3-title": ["A title"],
"article_set-3-content": ["Added content"],
"article_set-3-date_0": ["2008-3-18"],
"article_set-3-date_1": ["11:54:58"],
"article_set-3-section": [str(self.s1.pk)],
}
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Section.objects.get(pk=self.s1.pk).name, data["name"])
self.assertEqual(Article.objects.count(), 4)
# Invalid POST reshows inlines.
del data["name"]
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 6)
def test_change_view_with_view_and_delete_inlines(self):
"""User has view and delete permissions on the inline model."""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("change", Section._meta))
)
self.client.force_login(self.viewuser)
data = {
"name": "Name is required.",
"article_set-TOTAL_FORMS": 6,
"article_set-INITIAL_FORMS": 3,
"article_set-0-id": [str(self.a1.pk)],
"article_set-0-DELETE": ["on"],
}
# Inline POST details are ignored without delete permission.
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Article.objects.count(), 3)
# Deletion successful when delete permission is added.
self.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("delete", Article._meta))
)
data = {
"name": "Name is required.",
"article_set-TOTAL_FORMS": 6,
"article_set-INITIAL_FORMS": 3,
"article_set-0-id": [str(self.a1.pk)],
"article_set-0-DELETE": ["on"],
}
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Article.objects.count(), 2)
def test_delete_view(self):
"""Delete view should restrict access and actually delete items."""
delete_dict = {"post": "yes"}
delete_url = reverse("admin:admin_views_article_delete", args=(self.a1.pk,))
# add user should not be able to delete articles
self.client.force_login(self.adduser)
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 403)
post = self.client.post(delete_url, delete_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.logout()
# view user should not be able to delete articles
self.client.force_login(self.viewuser)
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 403)
post = self.client.post(delete_url, delete_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.logout()
# Delete user can delete
self.client.force_login(self.deleteuser)
response = self.client.get(
reverse("admin:admin_views_section_delete", args=(self.s1.pk,))
)
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Articles: 3</li>")
# test response contains link to related Article
self.assertContains(response, "admin_views/article/%s/" % self.a1.pk)
response = self.client.get(delete_url)
self.assertContains(response, "admin_views/article/%s/" % self.a1.pk)
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Articles: 1</li>")
post = self.client.post(delete_url, delete_dict)
self.assertRedirects(post, self.index_url)
self.assertEqual(Article.objects.count(), 2)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "Greetings from a deleted object")
article_ct = ContentType.objects.get_for_model(Article)
logged = LogEntry.objects.get(content_type=article_ct, action_flag=DELETION)
self.assertEqual(logged.object_id, str(self.a1.pk))
def test_delete_view_with_no_default_permissions(self):
"""
The delete view allows users to delete collected objects without a
'delete' permission (ReadOnlyPizza.Meta.default_permissions is empty).
"""
pizza = ReadOnlyPizza.objects.create(name="Double Cheese")
delete_url = reverse("admin:admin_views_readonlypizza_delete", args=(pizza.pk,))
self.client.force_login(self.adduser)
response = self.client.get(delete_url)
self.assertContains(response, "admin_views/readonlypizza/%s/" % pizza.pk)
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Read only pizzas: 1</li>")
post = self.client.post(delete_url, {"post": "yes"})
self.assertRedirects(
post, reverse("admin:admin_views_readonlypizza_changelist")
)
self.assertEqual(ReadOnlyPizza.objects.count(), 0)
def test_delete_view_nonexistent_obj(self):
self.client.force_login(self.deleteuser)
url = reverse("admin:admin_views_article_delete", args=("nonexistent",))
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["article with ID “nonexistent” doesn’t exist. Perhaps it was deleted?"],
)
def test_history_view(self):
"""History view should restrict access."""
# add user should not be able to view the list of article or change any of them
self.client.force_login(self.adduser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=(self.a1.pk,))
)
self.assertEqual(response.status_code, 403)
self.client.post(reverse("admin:logout"))
# view user can view all items
self.client.force_login(self.viewuser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=(self.a1.pk,))
)
self.assertEqual(response.status_code, 200)
self.client.post(reverse("admin:logout"))
# change user can view all items and edit them
self.client.force_login(self.changeuser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=(self.a1.pk,))
)
self.assertEqual(response.status_code, 200)
# Test redirection when using row-level change permissions. Refs #11513.
rl1 = RowLevelChangePermissionModel.objects.create(id=1, name="odd id")
rl2 = RowLevelChangePermissionModel.objects.create(id=2, name="even id")
logins = [
self.superuser,
self.viewuser,
self.adduser,
self.changeuser,
self.deleteuser,
]
for login_user in logins:
with self.subTest(login_user.username):
self.client.force_login(login_user)
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl1.pk,),
)
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl2.pk,),
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.client.post(reverse("admin:logout"))
for login_user in [self.joepublicuser, self.nostaffuser]:
with self.subTest(login_user.username):
self.client.force_login(login_user)
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl1.pk,),
)
response = self.client.get(url, follow=True)
self.assertContains(response, "login-form")
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl2.pk,),
)
response = self.client.get(url, follow=True)
self.assertContains(response, "login-form")
self.client.post(reverse("admin:logout"))
def test_history_view_bad_url(self):
self.client.force_login(self.changeuser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=("foo",)), follow=True
)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["article with ID “foo” doesn’t exist. Perhaps it was deleted?"],
)
def test_conditionally_show_add_section_link(self):
"""
The foreign key widget should only show the "add related" button if the
user has permission to add that related item.
"""
self.client.force_login(self.adduser)
# The user can't add sections yet, so they shouldn't see the "add section" link.
url = reverse("admin:admin_views_article_add")
add_link_text = "add_id_section"
response = self.client.get(url)
self.assertNotContains(response, add_link_text)
# Allow the user to add sections too. Now they can see the "add section" link.
user = User.objects.get(username="adduser")
perm = get_perm(Section, get_permission_codename("add", Section._meta))
user.user_permissions.add(perm)
response = self.client.get(url)
self.assertContains(response, add_link_text)
def test_conditionally_show_change_section_link(self):
"""
The foreign key widget should only show the "change related" button if
the user has permission to change that related item.
"""
def get_change_related(response):
return (
response.context["adminform"]
.form.fields["section"]
.widget.can_change_related
)
self.client.force_login(self.adduser)
# The user can't change sections yet, so they shouldn't see the
# "change section" link.
url = reverse("admin:admin_views_article_add")
change_link_text = "change_id_section"
response = self.client.get(url)
self.assertFalse(get_change_related(response))
self.assertNotContains(response, change_link_text)
# Allow the user to change sections too. Now they can see the
# "change section" link.
user = User.objects.get(username="adduser")
perm = get_perm(Section, get_permission_codename("change", Section._meta))
user.user_permissions.add(perm)
response = self.client.get(url)
self.assertTrue(get_change_related(response))
self.assertContains(response, change_link_text)
def test_conditionally_show_delete_section_link(self):
"""
The foreign key widget should only show the "delete related" button if
the user has permission to delete that related item.
"""
def get_delete_related(response):
return (
response.context["adminform"]
.form.fields["sub_section"]
.widget.can_delete_related
)
self.client.force_login(self.adduser)
# The user can't delete sections yet, so they shouldn't see the
# "delete section" link.
url = reverse("admin:admin_views_article_add")
delete_link_text = "delete_id_sub_section"
response = self.client.get(url)
self.assertFalse(get_delete_related(response))
self.assertNotContains(response, delete_link_text)
# Allow the user to delete sections too. Now they can see the
# "delete section" link.
user = User.objects.get(username="adduser")
perm = get_perm(Section, get_permission_codename("delete", Section._meta))
user.user_permissions.add(perm)
response = self.client.get(url)
self.assertTrue(get_delete_related(response))
self.assertContains(response, delete_link_text)
def test_disabled_permissions_when_logged_in(self):
self.client.force_login(self.superuser)
superuser = User.objects.get(username="super")
superuser.is_active = False
superuser.save()
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, 'id="login-form"')
self.assertNotContains(response, "Log out")
response = self.client.get(reverse("secure_view"), follow=True)
self.assertContains(response, 'id="login-form"')
def test_disabled_staff_permissions_when_logged_in(self):
self.client.force_login(self.superuser)
superuser = User.objects.get(username="super")
superuser.is_staff = False
superuser.save()
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, 'id="login-form"')
self.assertNotContains(response, "Log out")
response = self.client.get(reverse("secure_view"), follow=True)
self.assertContains(response, 'id="login-form"')
def test_app_list_permissions(self):
"""
If a user has no module perms, the app list returns a 404.
"""
opts = Article._meta
change_user = User.objects.get(username="changeuser")
permission = get_perm(Article, get_permission_codename("change", opts))
self.client.force_login(self.changeuser)
# the user has no module permissions
change_user.user_permissions.remove(permission)
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertEqual(response.status_code, 404)
# the user now has module permissions
change_user.user_permissions.add(permission)
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertEqual(response.status_code, 200)
def test_shortcut_view_only_available_to_staff(self):
"""
Only admin users should be able to use the admin shortcut view.
"""
model_ctype = ContentType.objects.get_for_model(ModelWithStringPrimaryKey)
obj = ModelWithStringPrimaryKey.objects.create(string_pk="foo")
shortcut_url = reverse("admin:view_on_site", args=(model_ctype.pk, obj.pk))
# Not logged in: we should see the login page.
response = self.client.get(shortcut_url, follow=True)
self.assertTemplateUsed(response, "admin/login.html")
# Logged in? Redirect.
self.client.force_login(self.superuser)
response = self.client.get(shortcut_url, follow=False)
# Can't use self.assertRedirects() because User.get_absolute_url() is silly.
self.assertEqual(response.status_code, 302)
# Domain may depend on contrib.sites tests also run
self.assertRegex(response.url, "http://(testserver|example.com)/dummy/foo/")
def test_has_module_permission(self):
"""
has_module_permission() returns True for all users who
have any permission for that module (add, change, or delete), so that
the module is displayed on the admin index page.
"""
self.client.force_login(self.superuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.viewuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.adduser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.changeuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.deleteuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
def test_overriding_has_module_permission(self):
"""
If has_module_permission() always returns False, the module shouldn't
be displayed on the admin index page for any users.
"""
articles = Article._meta.verbose_name_plural.title()
sections = Section._meta.verbose_name_plural.title()
index_url = reverse("admin7:index")
self.client.force_login(self.superuser)
response = self.client.get(index_url)
self.assertContains(response, sections)
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.viewuser)
response = self.client.get(index_url)
self.assertNotContains(response, "admin_views")
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.adduser)
response = self.client.get(index_url)
self.assertNotContains(response, "admin_views")
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.changeuser)
response = self.client.get(index_url)
self.assertNotContains(response, "admin_views")
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.deleteuser)
response = self.client.get(index_url)
self.assertNotContains(response, articles)
# The app list displays Sections but not Articles as the latter has
# ModelAdmin.has_module_permission() = False.
self.client.force_login(self.superuser)
response = self.client.get(reverse("admin7:app_list", args=("admin_views",)))
self.assertContains(response, sections)
self.assertNotContains(response, articles)
def test_post_save_message_no_forbidden_links_visible(self):
"""
Post-save message shouldn't contain a link to the change form if the
user doesn't have the change permission.
"""
self.client.force_login(self.adduser)
# Emulate Article creation for user with add-only permission.
post_data = {
"title": "Fun & games",
"content": "Some content",
"date_0": "2015-10-31",
"date_1": "16:35:00",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_article_add"), post_data, follow=True
)
self.assertContains(
response,
'<li class="success">The article “Fun & games” was added successfully.'
"</li>",
html=True,
)
@override_settings(
ROOT_URLCONF="admin_views.urls",
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
class AdminViewProxyModelPermissionsTests(TestCase):
"""Tests for proxy models permissions in the admin."""
@classmethod
def setUpTestData(cls):
cls.viewuser = User.objects.create_user(
username="viewuser", password="secret", is_staff=True
)
cls.adduser = User.objects.create_user(
username="adduser", password="secret", is_staff=True
)
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.deleteuser = User.objects.create_user(
username="deleteuser", password="secret", is_staff=True
)
# Setup permissions.
opts = UserProxy._meta
cls.viewuser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("view", opts))
)
cls.adduser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("add", opts))
)
cls.changeuser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("change", opts))
)
cls.deleteuser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("delete", opts))
)
# UserProxy instances.
cls.user_proxy = UserProxy.objects.create(
username="user_proxy", password="secret"
)
def test_add(self):
self.client.force_login(self.adduser)
url = reverse("admin:admin_views_userproxy_add")
data = {
"username": "can_add",
"password": "secret",
"date_joined_0": "2019-01-15",
"date_joined_1": "16:59:10",
}
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(UserProxy.objects.filter(username="can_add").exists())
def test_view(self):
self.client.force_login(self.viewuser)
response = self.client.get(reverse("admin:admin_views_userproxy_changelist"))
self.assertContains(response, "<h1>Select user proxy to view</h1>")
response = self.client.get(
reverse("admin:admin_views_userproxy_change", args=(self.user_proxy.pk,))
)
self.assertContains(response, "<h1>View user proxy</h1>")
self.assertContains(response, '<div class="readonly">user_proxy</div>')
def test_change(self):
self.client.force_login(self.changeuser)
data = {
"password": self.user_proxy.password,
"username": self.user_proxy.username,
"date_joined_0": self.user_proxy.date_joined.strftime("%Y-%m-%d"),
"date_joined_1": self.user_proxy.date_joined.strftime("%H:%M:%S"),
"first_name": "first_name",
}
url = reverse("admin:admin_views_userproxy_change", args=(self.user_proxy.pk,))
response = self.client.post(url, data)
self.assertRedirects(
response, reverse("admin:admin_views_userproxy_changelist")
)
self.assertEqual(
UserProxy.objects.get(pk=self.user_proxy.pk).first_name, "first_name"
)
def test_delete(self):
self.client.force_login(self.deleteuser)
url = reverse("admin:admin_views_userproxy_delete", args=(self.user_proxy.pk,))
response = self.client.post(url, {"post": "yes"}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFalse(UserProxy.objects.filter(pk=self.user_proxy.pk).exists())
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewsNoUrlTest(TestCase):
"""Regression test for #17333"""
@classmethod
def setUpTestData(cls):
# User who can change Reports
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.changeuser.user_permissions.add(
get_perm(Report, get_permission_codename("change", Report._meta))
)
def test_no_standard_modeladmin_urls(self):
"""Admin index views don't break when user's ModelAdmin removes standard urls"""
self.client.force_login(self.changeuser)
r = self.client.get(reverse("admin:index"))
# we shouldn't get a 500 error caused by a NoReverseMatch
self.assertEqual(r.status_code, 200)
self.client.post(reverse("admin:logout"))
@skipUnlessDBFeature("can_defer_constraint_checks")
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewDeletedObjectsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.deleteuser = User.objects.create_user(
username="deleteuser", password="secret", is_staff=True
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.v1 = Villain.objects.create(name="Adam")
cls.v2 = Villain.objects.create(name="Sue")
cls.sv1 = SuperVillain.objects.create(name="Bob")
cls.pl1 = Plot.objects.create(
name="World Domination", team_leader=cls.v1, contact=cls.v2
)
cls.pl2 = Plot.objects.create(
name="World Peace", team_leader=cls.v2, contact=cls.v2
)
cls.pl3 = Plot.objects.create(
name="Corn Conspiracy", team_leader=cls.v1, contact=cls.v1
)
cls.pd1 = PlotDetails.objects.create(details="almost finished", plot=cls.pl1)
cls.sh1 = SecretHideout.objects.create(
location="underground bunker", villain=cls.v1
)
cls.sh2 = SecretHideout.objects.create(
location="floating castle", villain=cls.sv1
)
cls.ssh1 = SuperSecretHideout.objects.create(
location="super floating castle!", supervillain=cls.sv1
)
cls.cy1 = CyclicOne.objects.create(name="I am recursive", two_id=1)
cls.cy2 = CyclicTwo.objects.create(name="I am recursive too", one_id=1)
def setUp(self):
self.client.force_login(self.superuser)
def test_nesting(self):
"""
Objects should be nested to display the relationships that
cause them to be scheduled for deletion.
"""
pattern = re.compile(
r'<li>Plot: <a href="%s">World Domination</a>\s*<ul>\s*'
r'<li>Plot details: <a href="%s">almost finished</a>'
% (
reverse("admin:admin_views_plot_change", args=(self.pl1.pk,)),
reverse("admin:admin_views_plotdetails_change", args=(self.pd1.pk,)),
)
)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v1.pk,))
)
self.assertRegex(response.content.decode(), pattern)
def test_cyclic(self):
"""
Cyclic relationships should still cause each object to only be
listed once.
"""
one = '<li>Cyclic one: <a href="%s">I am recursive</a>' % (
reverse("admin:admin_views_cyclicone_change", args=(self.cy1.pk,)),
)
two = '<li>Cyclic two: <a href="%s">I am recursive too</a>' % (
reverse("admin:admin_views_cyclictwo_change", args=(self.cy2.pk,)),
)
response = self.client.get(
reverse("admin:admin_views_cyclicone_delete", args=(self.cy1.pk,))
)
self.assertContains(response, one, 1)
self.assertContains(response, two, 1)
def test_perms_needed(self):
self.client.logout()
delete_user = User.objects.get(username="deleteuser")
delete_user.user_permissions.add(
get_perm(Plot, get_permission_codename("delete", Plot._meta))
)
self.client.force_login(self.deleteuser)
response = self.client.get(
reverse("admin:admin_views_plot_delete", args=(self.pl1.pk,))
)
self.assertContains(
response,
"your account doesn't have permission to delete the following types of "
"objects",
)
self.assertContains(response, "<li>plot details</li>")
def test_protected(self):
q = Question.objects.create(question="Why?")
a1 = Answer.objects.create(question=q, answer="Because.")
a2 = Answer.objects.create(question=q, answer="Yes.")
response = self.client.get(
reverse("admin:admin_views_question_delete", args=(q.pk,))
)
self.assertContains(
response, "would require deleting the following protected related objects"
)
self.assertContains(
response,
'<li>Answer: <a href="%s">Because.</a></li>'
% reverse("admin:admin_views_answer_change", args=(a1.pk,)),
)
self.assertContains(
response,
'<li>Answer: <a href="%s">Yes.</a></li>'
% reverse("admin:admin_views_answer_change", args=(a2.pk,)),
)
def test_post_delete_protected(self):
"""
A POST request to delete protected objects should display the page
which says the deletion is prohibited.
"""
q = Question.objects.create(question="Why?")
Answer.objects.create(question=q, answer="Because.")
response = self.client.post(
reverse("admin:admin_views_question_delete", args=(q.pk,)), {"post": "yes"}
)
self.assertEqual(Question.objects.count(), 1)
self.assertContains(
response, "would require deleting the following protected related objects"
)
def test_restricted(self):
album = Album.objects.create(title="Amaryllis")
song = Song.objects.create(album=album, name="Unity")
response = self.client.get(
reverse("admin:admin_views_album_delete", args=(album.pk,))
)
self.assertContains(
response,
"would require deleting the following protected related objects",
)
self.assertContains(
response,
'<li>Song: <a href="%s">Unity</a></li>'
% reverse("admin:admin_views_song_change", args=(song.pk,)),
)
def test_post_delete_restricted(self):
album = Album.objects.create(title="Amaryllis")
Song.objects.create(album=album, name="Unity")
response = self.client.post(
reverse("admin:admin_views_album_delete", args=(album.pk,)),
{"post": "yes"},
)
self.assertEqual(Album.objects.count(), 1)
self.assertContains(
response,
"would require deleting the following protected related objects",
)
def test_not_registered(self):
should_contain = """<li>Secret hideout: underground bunker"""
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v1.pk,))
)
self.assertContains(response, should_contain, 1)
def test_multiple_fkeys_to_same_model(self):
"""
If a deleted object has two relationships from another model,
both of those should be followed in looking for related
objects to delete.
"""
should_contain = '<li>Plot: <a href="%s">World Domination</a>' % reverse(
"admin:admin_views_plot_change", args=(self.pl1.pk,)
)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v1.pk,))
)
self.assertContains(response, should_contain)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v2.pk,))
)
self.assertContains(response, should_contain)
def test_multiple_fkeys_to_same_instance(self):
"""
If a deleted object has two relationships pointing to it from
another object, the other object should still only be listed
once.
"""
should_contain = '<li>Plot: <a href="%s">World Peace</a></li>' % reverse(
"admin:admin_views_plot_change", args=(self.pl2.pk,)
)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v2.pk,))
)
self.assertContains(response, should_contain, 1)
def test_inheritance(self):
"""
In the case of an inherited model, if either the child or
parent-model instance is deleted, both instances are listed
for deletion, as well as any relationships they have.
"""
should_contain = [
'<li>Villain: <a href="%s">Bob</a>'
% reverse("admin:admin_views_villain_change", args=(self.sv1.pk,)),
'<li>Super villain: <a href="%s">Bob</a>'
% reverse("admin:admin_views_supervillain_change", args=(self.sv1.pk,)),
"<li>Secret hideout: floating castle",
"<li>Super secret hideout: super floating castle!",
]
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.sv1.pk,))
)
for should in should_contain:
self.assertContains(response, should, 1)
response = self.client.get(
reverse("admin:admin_views_supervillain_delete", args=(self.sv1.pk,))
)
for should in should_contain:
self.assertContains(response, should, 1)
def test_generic_relations(self):
"""
If a deleted object has GenericForeignKeys pointing to it,
those objects should be listed for deletion.
"""
plot = self.pl3
tag = FunkyTag.objects.create(content_object=plot, name="hott")
should_contain = '<li>Funky tag: <a href="%s">hott' % reverse(
"admin:admin_views_funkytag_change", args=(tag.id,)
)
response = self.client.get(
reverse("admin:admin_views_plot_delete", args=(plot.pk,))
)
self.assertContains(response, should_contain)
def test_generic_relations_with_related_query_name(self):
"""
If a deleted object has GenericForeignKey with
GenericRelation(related_query_name='...') pointing to it, those objects
should be listed for deletion.
"""
bookmark = Bookmark.objects.create(name="djangoproject")
tag = FunkyTag.objects.create(content_object=bookmark, name="django")
tag_url = reverse("admin:admin_views_funkytag_change", args=(tag.id,))
should_contain = '<li>Funky tag: <a href="%s">django' % tag_url
response = self.client.get(
reverse("admin:admin_views_bookmark_delete", args=(bookmark.pk,))
)
self.assertContains(response, should_contain)
def test_delete_view_uses_get_deleted_objects(self):
"""The delete view uses ModelAdmin.get_deleted_objects()."""
book = Book.objects.create(name="Test Book")
response = self.client.get(
reverse("admin2:admin_views_book_delete", args=(book.pk,))
)
# BookAdmin.get_deleted_objects() returns custom text.
self.assertContains(response, "a deletable object")
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestGenericRelations(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.v1 = Villain.objects.create(name="Adam")
cls.pl3 = Plot.objects.create(
name="Corn Conspiracy", team_leader=cls.v1, contact=cls.v1
)
def setUp(self):
self.client.force_login(self.superuser)
def test_generic_content_object_in_list_display(self):
FunkyTag.objects.create(content_object=self.pl3, name="hott")
response = self.client.get(reverse("admin:admin_views_funkytag_changelist"))
self.assertContains(response, "%s</td>" % self.pl3)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewStringPrimaryKeyTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.pk = (
"abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 "
r"""-_.!~*'() ;/?:@&=+$, <>#%" {}|\^[]`"""
)
cls.m1 = ModelWithStringPrimaryKey.objects.create(string_pk=cls.pk)
content_type_pk = ContentType.objects.get_for_model(
ModelWithStringPrimaryKey
).pk
user_pk = cls.superuser.pk
LogEntry.objects.log_action(
user_pk,
content_type_pk,
cls.pk,
cls.pk,
2,
change_message="Changed something",
)
def setUp(self):
self.client.force_login(self.superuser)
def test_get_history_view(self):
"""
Retrieving the history for an object using urlencoded form of primary
key should work.
Refs #12349, #18550.
"""
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_history", args=(self.pk,)
)
)
self.assertContains(response, escape(self.pk))
self.assertContains(response, "Changed something")
def test_get_change_view(self):
"Retrieving the object using urlencoded form of primary key should work"
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change", args=(self.pk,)
)
)
self.assertContains(response, escape(self.pk))
def test_changelist_to_changeform_link(self):
"""
Link to the changeform of the object in changelist should use reverse()
and be quoted.
"""
response = self.client.get(
reverse("admin:admin_views_modelwithstringprimarykey_changelist")
)
# this URL now comes through reverse(), thus url quoting and iri_to_uri encoding
pk_final_url = escape(iri_to_uri(quote(self.pk)))
change_url = reverse(
"admin:admin_views_modelwithstringprimarykey_change", args=("__fk__",)
).replace("__fk__", pk_final_url)
should_contain = '<th class="field-__str__"><a href="%s">%s</a></th>' % (
change_url,
escape(self.pk),
)
self.assertContains(response, should_contain)
def test_recentactions_link(self):
"""
The link from the recent actions list referring to the changeform of
the object should be quoted.
"""
response = self.client.get(reverse("admin:index"))
link = reverse(
"admin:admin_views_modelwithstringprimarykey_change", args=(quote(self.pk),)
)
should_contain = """<a href="%s">%s</a>""" % (escape(link), escape(self.pk))
self.assertContains(response, should_contain)
def test_deleteconfirmation_link(self):
""" "
The link from the delete confirmation page referring back to the
changeform of the object should be quoted.
"""
url = reverse(
"admin:admin_views_modelwithstringprimarykey_delete", args=(quote(self.pk),)
)
response = self.client.get(url)
# this URL now comes through reverse(), thus url quoting and iri_to_uri encoding
change_url = reverse(
"admin:admin_views_modelwithstringprimarykey_change", args=("__fk__",)
).replace("__fk__", escape(iri_to_uri(quote(self.pk))))
should_contain = '<a href="%s">%s</a>' % (change_url, escape(self.pk))
self.assertContains(response, should_contain)
def test_url_conflicts_with_add(self):
"A model with a primary key that ends with add or is `add` should be visible"
add_model = ModelWithStringPrimaryKey.objects.create(
pk="i have something to add"
)
add_model.save()
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(add_model.pk),),
)
)
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
add_model2 = ModelWithStringPrimaryKey.objects.create(pk="add")
add_url = reverse("admin:admin_views_modelwithstringprimarykey_add")
change_url = reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(add_model2.pk),),
)
self.assertNotEqual(add_url, change_url)
def test_url_conflicts_with_delete(self):
"A model with a primary key that ends with delete should be visible"
delete_model = ModelWithStringPrimaryKey(pk="delete")
delete_model.save()
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(delete_model.pk),),
)
)
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
def test_url_conflicts_with_history(self):
"A model with a primary key that ends with history should be visible"
history_model = ModelWithStringPrimaryKey(pk="history")
history_model.save()
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(history_model.pk),),
)
)
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
def test_shortcut_view_with_escaping(self):
"'View on site should' work properly with char fields"
model = ModelWithStringPrimaryKey(pk="abc_123")
model.save()
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(model.pk),),
)
)
should_contain = '/%s/" class="viewsitelink">' % model.pk
self.assertContains(response, should_contain)
def test_change_view_history_link(self):
"""Object history button link should work and contain the pk value quoted."""
url = reverse(
"admin:%s_modelwithstringprimarykey_change"
% ModelWithStringPrimaryKey._meta.app_label,
args=(quote(self.pk),),
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
expected_link = reverse(
"admin:%s_modelwithstringprimarykey_history"
% ModelWithStringPrimaryKey._meta.app_label,
args=(quote(self.pk),),
)
self.assertContains(
response, '<a href="%s" class="historylink"' % escape(expected_link)
)
def test_redirect_on_add_view_continue_button(self):
"""As soon as an object is added using "Save and continue editing"
button, the user should be redirected to the object's change_view.
In case primary key is a string containing some special characters
like slash or underscore, these characters must be escaped (see #22266)
"""
response = self.client.post(
reverse("admin:admin_views_modelwithstringprimarykey_add"),
{
"string_pk": "123/history",
"_continue": "1", # Save and continue editing
},
)
self.assertEqual(response.status_code, 302) # temporary redirect
self.assertIn("/123_2Fhistory/", response.headers["location"]) # PK is quoted
@override_settings(ROOT_URLCONF="admin_views.urls")
class SecureViewTests(TestCase):
"""
Test behavior of a view protected by the staff_member_required decorator.
"""
def test_secure_view_shows_login_if_not_logged_in(self):
secure_url = reverse("secure_view")
response = self.client.get(secure_url)
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin:login"), secure_url)
)
response = self.client.get(secure_url, follow=True)
self.assertTemplateUsed(response, "admin/login.html")
self.assertEqual(response.context[REDIRECT_FIELD_NAME], secure_url)
def test_staff_member_required_decorator_works_with_argument(self):
"""
Staff_member_required decorator works with an argument
(redirect_field_name).
"""
secure_url = "/test_admin/admin/secure-view2/"
response = self.client.get(secure_url)
self.assertRedirects(
response, "%s?myfield=%s" % (reverse("admin:login"), secure_url)
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewUnicodeTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.b1 = Book.objects.create(name="Lærdommer")
cls.p1 = Promo.objects.create(name="<Promo for Lærdommer>", book=cls.b1)
cls.chap1 = Chapter.objects.create(
title="Norske bostaver æøå skaper problemer",
content="<p>Svært frustrerende med UnicodeDecodeErro</p>",
book=cls.b1,
)
cls.chap2 = Chapter.objects.create(
title="Kjærlighet",
content="<p>La kjærligheten til de lidende seire.</p>",
book=cls.b1,
)
cls.chap3 = Chapter.objects.create(
title="Kjærlighet", content="<p>Noe innhold</p>", book=cls.b1
)
cls.chap4 = ChapterXtra1.objects.create(
chap=cls.chap1, xtra="<Xtra(1) Norske bostaver æøå skaper problemer>"
)
cls.chap5 = ChapterXtra1.objects.create(
chap=cls.chap2, xtra="<Xtra(1) Kjærlighet>"
)
cls.chap6 = ChapterXtra1.objects.create(
chap=cls.chap3, xtra="<Xtra(1) Kjærlighet>"
)
cls.chap7 = ChapterXtra2.objects.create(
chap=cls.chap1, xtra="<Xtra(2) Norske bostaver æøå skaper problemer>"
)
cls.chap8 = ChapterXtra2.objects.create(
chap=cls.chap2, xtra="<Xtra(2) Kjærlighet>"
)
cls.chap9 = ChapterXtra2.objects.create(
chap=cls.chap3, xtra="<Xtra(2) Kjærlighet>"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_unicode_edit(self):
"""
A test to ensure that POST on edit_view handles non-ASCII characters.
"""
post_data = {
"name": "Test lærdommer",
# inline data
"chapter_set-TOTAL_FORMS": "6",
"chapter_set-INITIAL_FORMS": "3",
"chapter_set-MAX_NUM_FORMS": "0",
"chapter_set-0-id": self.chap1.pk,
"chapter_set-0-title": "Norske bostaver æøå skaper problemer",
"chapter_set-0-content": (
"<p>Svært frustrerende med UnicodeDecodeError</p>"
),
"chapter_set-1-id": self.chap2.id,
"chapter_set-1-title": "Kjærlighet.",
"chapter_set-1-content": (
"<p>La kjærligheten til de lidende seire.</p>"
),
"chapter_set-2-id": self.chap3.id,
"chapter_set-2-title": "Need a title.",
"chapter_set-2-content": "<p>Newest content</p>",
"chapter_set-3-id": "",
"chapter_set-3-title": "",
"chapter_set-3-content": "",
"chapter_set-4-id": "",
"chapter_set-4-title": "",
"chapter_set-4-content": "",
"chapter_set-5-id": "",
"chapter_set-5-title": "",
"chapter_set-5-content": "",
}
response = self.client.post(
reverse("admin:admin_views_book_change", args=(self.b1.pk,)), post_data
)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_unicode_delete(self):
"""
The delete_view handles non-ASCII characters
"""
delete_dict = {"post": "yes"}
delete_url = reverse("admin:admin_views_book_delete", args=(self.b1.pk,))
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(delete_url, delete_dict)
self.assertRedirects(response, reverse("admin:admin_views_book_changelist"))
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewListEditable(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.per1 = Person.objects.create(name="John Mauchly", gender=1, alive=True)
cls.per2 = Person.objects.create(name="Grace Hopper", gender=1, alive=False)
cls.per3 = Person.objects.create(name="Guido van Rossum", gender=1, alive=True)
def setUp(self):
self.client.force_login(self.superuser)
def test_inheritance(self):
Podcast.objects.create(
name="This Week in Django", release_date=datetime.date.today()
)
response = self.client.get(reverse("admin:admin_views_podcast_changelist"))
self.assertEqual(response.status_code, 200)
def test_inheritance_2(self):
Vodcast.objects.create(name="This Week in Django", released=True)
response = self.client.get(reverse("admin:admin_views_vodcast_changelist"))
self.assertEqual(response.status_code, 200)
def test_custom_pk(self):
Language.objects.create(iso="en", name="English", english_name="English")
response = self.client.get(reverse("admin:admin_views_language_changelist"))
self.assertEqual(response.status_code, 200)
def test_changelist_input_html(self):
response = self.client.get(reverse("admin:admin_views_person_changelist"))
# 2 inputs per object(the field and the hidden id field) = 6
# 4 management hidden fields = 4
# 4 action inputs (3 regular checkboxes, 1 checkbox to select all)
# main form submit button = 1
# search field and search submit button = 2
# CSRF field = 2
# field to track 'select all' across paginated views = 1
# 6 + 4 + 4 + 1 + 2 + 2 + 1 = 20 inputs
self.assertContains(response, "<input", count=21)
# 1 select per object = 3 selects
self.assertContains(response, "<select", count=4)
def test_post_messages(self):
# Ticket 12707: Saving inline editable should not show admin
# action warnings
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": str(self.per1.pk),
"form-1-gender": "2",
"form-1-id": str(self.per2.pk),
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": str(self.per3.pk),
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_person_changelist"), data, follow=True
)
self.assertEqual(len(response.context["messages"]), 1)
def test_post_submission(self):
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": str(self.per1.pk),
"form-1-gender": "2",
"form-1-id": str(self.per2.pk),
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": str(self.per3.pk),
"_save": "Save",
}
self.client.post(reverse("admin:admin_views_person_changelist"), data)
self.assertIs(Person.objects.get(name="John Mauchly").alive, False)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 2)
# test a filtered page
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "2",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(self.per1.pk),
"form-0-gender": "1",
"form-0-alive": "checked",
"form-1-id": str(self.per3.pk),
"form-1-gender": "1",
"form-1-alive": "checked",
"_save": "Save",
}
self.client.post(
reverse("admin:admin_views_person_changelist") + "?gender__exact=1", data
)
self.assertIs(Person.objects.get(name="John Mauchly").alive, True)
# test a searched page
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(self.per1.pk),
"form-0-gender": "1",
"_save": "Save",
}
self.client.post(
reverse("admin:admin_views_person_changelist") + "?q=john", data
)
self.assertIs(Person.objects.get(name="John Mauchly").alive, False)
def test_non_field_errors(self):
"""
Non-field errors are displayed for each of the forms in the
changelist's formset.
"""
fd1 = FoodDelivery.objects.create(
reference="123", driver="bill", restaurant="thai"
)
fd2 = FoodDelivery.objects.create(
reference="456", driver="bill", restaurant="india"
)
fd3 = FoodDelivery.objects.create(
reference="789", driver="bill", restaurant="pizza"
)
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(fd1.id),
"form-0-reference": "123",
"form-0-driver": "bill",
"form-0-restaurant": "thai",
# Same data as above: Forbidden because of unique_together!
"form-1-id": str(fd2.id),
"form-1-reference": "456",
"form-1-driver": "bill",
"form-1-restaurant": "thai",
"form-2-id": str(fd3.id),
"form-2-reference": "789",
"form-2-driver": "bill",
"form-2-restaurant": "pizza",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_fooddelivery_changelist"), data
)
self.assertContains(
response,
'<tr><td colspan="4"><ul class="errorlist nonfield"><li>Food delivery '
"with this Driver and Restaurant already exists.</li></ul></td></tr>",
1,
html=True,
)
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(fd1.id),
"form-0-reference": "123",
"form-0-driver": "bill",
"form-0-restaurant": "thai",
# Same data as above: Forbidden because of unique_together!
"form-1-id": str(fd2.id),
"form-1-reference": "456",
"form-1-driver": "bill",
"form-1-restaurant": "thai",
# Same data also.
"form-2-id": str(fd3.id),
"form-2-reference": "789",
"form-2-driver": "bill",
"form-2-restaurant": "thai",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_fooddelivery_changelist"), data
)
self.assertContains(
response,
'<tr><td colspan="4"><ul class="errorlist nonfield"><li>Food delivery '
"with this Driver and Restaurant already exists.</li></ul></td></tr>",
2,
html=True,
)
def test_non_form_errors(self):
# test if non-form errors are handled; ticket #12716
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(self.per2.pk),
"form-0-alive": "1",
"form-0-gender": "2",
# The form processing understands this as a list_editable "Save"
# and not an action "Go".
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_person_changelist"), data
)
self.assertContains(response, "Grace is not a Zombie")
def test_non_form_errors_is_errorlist(self):
# test if non-form errors are correctly handled; ticket #12878
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(self.per2.pk),
"form-0-alive": "1",
"form-0-gender": "2",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_person_changelist"), data
)
non_form_errors = response.context["cl"].formset.non_form_errors()
self.assertIsInstance(non_form_errors, ErrorList)
self.assertEqual(
str(non_form_errors),
str(ErrorList(["Grace is not a Zombie"], error_class="nonform")),
)
def test_list_editable_ordering(self):
collector = Collector.objects.create(id=1, name="Frederick Clegg")
Category.objects.create(id=1, order=1, collector=collector)
Category.objects.create(id=2, order=2, collector=collector)
Category.objects.create(id=3, order=0, collector=collector)
Category.objects.create(id=4, order=0, collector=collector)
# NB: The order values must be changed so that the items are reordered.
data = {
"form-TOTAL_FORMS": "4",
"form-INITIAL_FORMS": "4",
"form-MAX_NUM_FORMS": "0",
"form-0-order": "14",
"form-0-id": "1",
"form-0-collector": "1",
"form-1-order": "13",
"form-1-id": "2",
"form-1-collector": "1",
"form-2-order": "1",
"form-2-id": "3",
"form-2-collector": "1",
"form-3-order": "0",
"form-3-id": "4",
"form-3-collector": "1",
# The form processing understands this as a list_editable "Save"
# and not an action "Go".
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_category_changelist"), data
)
# Successful post will redirect
self.assertEqual(response.status_code, 302)
# The order values have been applied to the right objects
self.assertEqual(Category.objects.get(id=1).order, 14)
self.assertEqual(Category.objects.get(id=2).order, 13)
self.assertEqual(Category.objects.get(id=3).order, 1)
self.assertEqual(Category.objects.get(id=4).order, 0)
def test_list_editable_pagination(self):
"""
Pagination works for list_editable items.
"""
UnorderedObject.objects.create(id=1, name="Unordered object #1")
UnorderedObject.objects.create(id=2, name="Unordered object #2")
UnorderedObject.objects.create(id=3, name="Unordered object #3")
response = self.client.get(
reverse("admin:admin_views_unorderedobject_changelist")
)
self.assertContains(response, "Unordered object #3")
self.assertContains(response, "Unordered object #2")
self.assertNotContains(response, "Unordered object #1")
response = self.client.get(
reverse("admin:admin_views_unorderedobject_changelist") + "?p=2"
)
self.assertNotContains(response, "Unordered object #3")
self.assertNotContains(response, "Unordered object #2")
self.assertContains(response, "Unordered object #1")
def test_list_editable_action_submit(self):
# List editable changes should not be executed if the action "Go" button is
# used to submit the form.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "1",
"form-1-gender": "2",
"form-1-id": "2",
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "3",
"index": "0",
"_selected_action": ["3"],
"action": ["", "delete_selected"],
}
self.client.post(reverse("admin:admin_views_person_changelist"), data)
self.assertIs(Person.objects.get(name="John Mauchly").alive, True)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 1)
def test_list_editable_action_choices(self):
# List editable changes should be executed if the "Save" button is
# used to submit the form - any action choices should be ignored.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": str(self.per1.pk),
"form-1-gender": "2",
"form-1-id": str(self.per2.pk),
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": str(self.per3.pk),
"_save": "Save",
"_selected_action": ["1"],
"action": ["", "delete_selected"],
}
self.client.post(reverse("admin:admin_views_person_changelist"), data)
self.assertIs(Person.objects.get(name="John Mauchly").alive, False)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 2)
def test_list_editable_popup(self):
"""
Fields should not be list-editable in popups.
"""
response = self.client.get(reverse("admin:admin_views_person_changelist"))
self.assertNotEqual(response.context["cl"].list_editable, ())
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?%s" % IS_POPUP_VAR
)
self.assertEqual(response.context["cl"].list_editable, ())
def test_pk_hidden_fields(self):
"""
hidden pk fields aren't displayed in the table body and their
corresponding human-readable value is displayed instead. The hidden pk
fields are displayed but separately (not in the table) and only once.
"""
story1 = Story.objects.create(
title="The adventures of Guido", content="Once upon a time in Djangoland..."
)
story2 = Story.objects.create(
title="Crouching Tiger, Hidden Python",
content="The Python was sneaking into...",
)
response = self.client.get(reverse("admin:admin_views_story_changelist"))
# Only one hidden field, in a separate place than the table.
self.assertContains(response, 'id="id_form-0-id"', 1)
self.assertContains(response, 'id="id_form-1-id"', 1)
self.assertContains(
response,
'<div class="hiddenfields">\n'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id">'
'<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id">\n'
"</div>" % (story2.id, story1.id),
html=True,
)
self.assertContains(response, '<td class="field-id">%d</td>' % story1.id, 1)
self.assertContains(response, '<td class="field-id">%d</td>' % story2.id, 1)
def test_pk_hidden_fields_with_list_display_links(self):
"""Similarly as test_pk_hidden_fields, but when the hidden pk fields are
referenced in list_display_links.
Refs #12475.
"""
story1 = OtherStory.objects.create(
title="The adventures of Guido",
content="Once upon a time in Djangoland...",
)
story2 = OtherStory.objects.create(
title="Crouching Tiger, Hidden Python",
content="The Python was sneaking into...",
)
link1 = reverse("admin:admin_views_otherstory_change", args=(story1.pk,))
link2 = reverse("admin:admin_views_otherstory_change", args=(story2.pk,))
response = self.client.get(reverse("admin:admin_views_otherstory_changelist"))
# Only one hidden field, in a separate place than the table.
self.assertContains(response, 'id="id_form-0-id"', 1)
self.assertContains(response, 'id="id_form-1-id"', 1)
self.assertContains(
response,
'<div class="hiddenfields">\n'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id">'
'<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id">\n'
"</div>" % (story2.id, story1.id),
html=True,
)
self.assertContains(
response,
'<th class="field-id"><a href="%s">%d</a></th>' % (link1, story1.id),
1,
)
self.assertContains(
response,
'<th class="field-id"><a href="%s">%d</a></th>' % (link2, story2.id),
1,
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminSearchTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.joepublicuser = User.objects.create_user(
username="joepublic", password="secret"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.per1 = Person.objects.create(name="John Mauchly", gender=1, alive=True)
cls.per2 = Person.objects.create(name="Grace Hopper", gender=1, alive=False)
cls.per3 = Person.objects.create(name="Guido van Rossum", gender=1, alive=True)
Person.objects.create(name="John Doe", gender=1)
Person.objects.create(name='John O"Hara', gender=1)
Person.objects.create(name="John O'Hara", gender=1)
cls.t1 = Recommender.objects.create()
cls.t2 = Recommendation.objects.create(the_recommender=cls.t1)
cls.t3 = Recommender.objects.create()
cls.t4 = Recommendation.objects.create(the_recommender=cls.t3)
cls.tt1 = TitleTranslation.objects.create(title=cls.t1, text="Bar")
cls.tt2 = TitleTranslation.objects.create(title=cls.t2, text="Foo")
cls.tt3 = TitleTranslation.objects.create(title=cls.t3, text="Few")
cls.tt4 = TitleTranslation.objects.create(title=cls.t4, text="Bas")
def setUp(self):
self.client.force_login(self.superuser)
def test_search_on_sibling_models(self):
"A search that mentions sibling models"
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=bar"
)
# confirm the search returned 1 object
self.assertContains(response, "\n1 recommendation\n")
def test_with_fk_to_field(self):
"""
The to_field GET parameter is preserved when a search is performed.
Refs #10918.
"""
response = self.client.get(
reverse("admin:auth_user_changelist") + "?q=joe&%s=id" % TO_FIELD_VAR
)
self.assertContains(response, "\n1 user\n")
self.assertContains(
response,
'<input type="hidden" name="%s" value="id">' % TO_FIELD_VAR,
html=True,
)
def test_exact_matches(self):
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=bar"
)
# confirm the search returned one object
self.assertContains(response, "\n1 recommendation\n")
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=ba"
)
# confirm the search returned zero objects
self.assertContains(response, "\n0 recommendations\n")
def test_beginning_matches(self):
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?q=Gui"
)
# confirm the search returned one object
self.assertContains(response, "\n1 person\n")
self.assertContains(response, "Guido")
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?q=uido"
)
# confirm the search returned zero objects
self.assertContains(response, "\n0 persons\n")
self.assertNotContains(response, "Guido")
def test_pluggable_search(self):
PluggableSearchPerson.objects.create(name="Bob", age=10)
PluggableSearchPerson.objects.create(name="Amy", age=20)
response = self.client.get(
reverse("admin:admin_views_pluggablesearchperson_changelist") + "?q=Bob"
)
# confirm the search returned one object
self.assertContains(response, "\n1 pluggable search person\n")
self.assertContains(response, "Bob")
response = self.client.get(
reverse("admin:admin_views_pluggablesearchperson_changelist") + "?q=20"
)
# confirm the search returned one object
self.assertContains(response, "\n1 pluggable search person\n")
self.assertContains(response, "Amy")
def test_reset_link(self):
"""
Test presence of reset link in search bar ("1 result (_x total_)").
"""
# 1 query for session + 1 for fetching user
# + 1 for filtered result + 1 for filtered count
# + 1 for total count
with self.assertNumQueries(5):
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?q=Gui"
)
self.assertContains(
response,
"""<span class="small quiet">1 result (<a href="?">6 total</a>)</span>""",
html=True,
)
def test_no_total_count(self):
"""
#8408 -- "Show all" should be displayed instead of the total count if
ModelAdmin.show_full_result_count is False.
"""
# 1 query for session + 1 for fetching user
# + 1 for filtered result + 1 for filtered count
with self.assertNumQueries(4):
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=bar"
)
self.assertContains(
response,
"""<span class="small quiet">1 result (<a href="?">Show all</a>)</span>""",
html=True,
)
self.assertTrue(response.context["cl"].show_admin_actions)
def test_search_with_spaces(self):
url = reverse("admin:admin_views_person_changelist") + "?q=%s"
tests = [
('"John Doe"', 1),
("'John Doe'", 1),
("John Doe", 0),
('"John Doe" John', 1),
("'John Doe' John", 1),
("John Doe John", 0),
('"John Do"', 1),
("'John Do'", 1),
("'John O'Hara'", 0),
("'John O\\'Hara'", 1),
('"John O"Hara"', 0),
('"John O\\"Hara"', 1),
]
for search, hits in tests:
with self.subTest(search=search):
response = self.client.get(url % search)
self.assertContains(response, "\n%s person" % hits)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminInheritedInlinesTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_inline(self):
"""
Inline models which inherit from a common parent are correctly handled.
"""
foo_user = "foo username"
bar_user = "bar username"
name_re = re.compile(b'name="(.*?)"')
# test the add case
response = self.client.get(reverse("admin:admin_views_persona_add"))
names = name_re.findall(response.content)
names.remove(b"csrfmiddlewaretoken")
# make sure we have no duplicate HTML names
self.assertEqual(len(names), len(set(names)))
# test the add case
post_data = {
"name": "Test Name",
# inline data
"accounts-TOTAL_FORMS": "1",
"accounts-INITIAL_FORMS": "0",
"accounts-MAX_NUM_FORMS": "0",
"accounts-0-username": foo_user,
"accounts-2-TOTAL_FORMS": "1",
"accounts-2-INITIAL_FORMS": "0",
"accounts-2-MAX_NUM_FORMS": "0",
"accounts-2-0-username": bar_user,
}
response = self.client.post(reverse("admin:admin_views_persona_add"), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
self.assertEqual(Persona.objects.count(), 1)
self.assertEqual(FooAccount.objects.count(), 1)
self.assertEqual(BarAccount.objects.count(), 1)
self.assertEqual(FooAccount.objects.all()[0].username, foo_user)
self.assertEqual(BarAccount.objects.all()[0].username, bar_user)
self.assertEqual(Persona.objects.all()[0].accounts.count(), 2)
persona_id = Persona.objects.all()[0].id
foo_id = FooAccount.objects.all()[0].id
bar_id = BarAccount.objects.all()[0].id
# test the edit case
response = self.client.get(
reverse("admin:admin_views_persona_change", args=(persona_id,))
)
names = name_re.findall(response.content)
names.remove(b"csrfmiddlewaretoken")
# make sure we have no duplicate HTML names
self.assertEqual(len(names), len(set(names)))
post_data = {
"name": "Test Name",
"accounts-TOTAL_FORMS": "2",
"accounts-INITIAL_FORMS": "1",
"accounts-MAX_NUM_FORMS": "0",
"accounts-0-username": "%s-1" % foo_user,
"accounts-0-account_ptr": str(foo_id),
"accounts-0-persona": str(persona_id),
"accounts-2-TOTAL_FORMS": "2",
"accounts-2-INITIAL_FORMS": "1",
"accounts-2-MAX_NUM_FORMS": "0",
"accounts-2-0-username": "%s-1" % bar_user,
"accounts-2-0-account_ptr": str(bar_id),
"accounts-2-0-persona": str(persona_id),
}
response = self.client.post(
reverse("admin:admin_views_persona_change", args=(persona_id,)), post_data
)
self.assertEqual(response.status_code, 302)
self.assertEqual(Persona.objects.count(), 1)
self.assertEqual(FooAccount.objects.count(), 1)
self.assertEqual(BarAccount.objects.count(), 1)
self.assertEqual(FooAccount.objects.all()[0].username, "%s-1" % foo_user)
self.assertEqual(BarAccount.objects.all()[0].username, "%s-1" % bar_user)
self.assertEqual(Persona.objects.all()[0].accounts.count(), 2)
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestCustomChangeList(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_custom_changelist(self):
"""
Validate that a custom ChangeList class can be used (#9749)
"""
# Insert some data
post_data = {"name": "First Gadget"}
response = self.client.post(reverse("admin:admin_views_gadget_add"), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
# Hit the page once to get messages out of the queue message list
response = self.client.get(reverse("admin:admin_views_gadget_changelist"))
# Data is still not visible on the page
response = self.client.get(reverse("admin:admin_views_gadget_changelist"))
self.assertNotContains(response, "First Gadget")
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestInlineNotEditable(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_GET_parent_add(self):
"""
InlineModelAdmin broken?
"""
response = self.client.get(reverse("admin:admin_views_parent_add"))
self.assertEqual(response.status_code, 200)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminCustomQuerysetTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.pks = [EmptyModel.objects.create().id for i in range(3)]
def setUp(self):
self.client.force_login(self.superuser)
self.super_login = {
REDIRECT_FIELD_NAME: reverse("admin:index"),
"username": "super",
"password": "secret",
}
def test_changelist_view(self):
response = self.client.get(reverse("admin:admin_views_emptymodel_changelist"))
for i in self.pks:
if i > 1:
self.assertContains(response, "Primary key = %s" % i)
else:
self.assertNotContains(response, "Primary key = %s" % i)
def test_changelist_view_count_queries(self):
# create 2 Person objects
Person.objects.create(name="person1", gender=1)
Person.objects.create(name="person2", gender=2)
changelist_url = reverse("admin:admin_views_person_changelist")
# 5 queries are expected: 1 for the session, 1 for the user,
# 2 for the counts and 1 for the objects on the page
with self.assertNumQueries(5):
resp = self.client.get(changelist_url)
self.assertEqual(resp.context["selection_note"], "0 of 2 selected")
self.assertEqual(resp.context["selection_note_all"], "All 2 selected")
with self.assertNumQueries(5):
extra = {"q": "not_in_name"}
resp = self.client.get(changelist_url, extra)
self.assertEqual(resp.context["selection_note"], "0 of 0 selected")
self.assertEqual(resp.context["selection_note_all"], "All 0 selected")
with self.assertNumQueries(5):
extra = {"q": "person"}
resp = self.client.get(changelist_url, extra)
self.assertEqual(resp.context["selection_note"], "0 of 2 selected")
self.assertEqual(resp.context["selection_note_all"], "All 2 selected")
with self.assertNumQueries(5):
extra = {"gender__exact": "1"}
resp = self.client.get(changelist_url, extra)
self.assertEqual(resp.context["selection_note"], "0 of 1 selected")
self.assertEqual(resp.context["selection_note_all"], "1 selected")
def test_change_view(self):
for i in self.pks:
url = reverse("admin:admin_views_emptymodel_change", args=(i,))
response = self.client.get(url, follow=True)
if i > 1:
self.assertEqual(response.status_code, 200)
else:
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["empty model with ID “1” doesn’t exist. Perhaps it was deleted?"],
)
def test_add_model_modeladmin_defer_qs(self):
# Test for #14529. defer() is used in ModelAdmin.get_queryset()
# model has __str__ method
self.assertEqual(CoverLetter.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"author": "Candidate, Best",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_coverletter_add"), post_data, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(CoverLetter.objects.count(), 1)
# Message should contain non-ugly model verbose name
pk = CoverLetter.objects.all()[0].pk
self.assertContains(
response,
'<li class="success">The cover letter “<a href="%s">'
"Candidate, Best</a>” was added successfully.</li>"
% reverse("admin:admin_views_coverletter_change", args=(pk,)),
html=True,
)
# model has no __str__ method
self.assertEqual(ShortMessage.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"content": "What's this SMS thing?",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_shortmessage_add"), post_data, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(ShortMessage.objects.count(), 1)
# Message should contain non-ugly model verbose name
sm = ShortMessage.objects.all()[0]
self.assertContains(
response,
'<li class="success">The short message “<a href="%s">'
"%s</a>” was added successfully.</li>"
% (reverse("admin:admin_views_shortmessage_change", args=(sm.pk,)), sm),
html=True,
)
def test_add_model_modeladmin_only_qs(self):
# Test for #14529. only() is used in ModelAdmin.get_queryset()
# model has __str__ method
self.assertEqual(Telegram.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"title": "Urgent telegram",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_telegram_add"), post_data, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Telegram.objects.count(), 1)
# Message should contain non-ugly model verbose name
pk = Telegram.objects.all()[0].pk
self.assertContains(
response,
'<li class="success">The telegram “<a href="%s">'
"Urgent telegram</a>” was added successfully.</li>"
% reverse("admin:admin_views_telegram_change", args=(pk,)),
html=True,
)
# model has no __str__ method
self.assertEqual(Paper.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"title": "My Modified Paper Title",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_paper_add"), post_data, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Paper.objects.count(), 1)
# Message should contain non-ugly model verbose name
p = Paper.objects.all()[0]
self.assertContains(
response,
'<li class="success">The paper “<a href="%s">'
"%s</a>” was added successfully.</li>"
% (reverse("admin:admin_views_paper_change", args=(p.pk,)), p),
html=True,
)
def test_edit_model_modeladmin_defer_qs(self):
# Test for #14529. defer() is used in ModelAdmin.get_queryset()
# model has __str__ method
cl = CoverLetter.objects.create(author="John Doe")
self.assertEqual(CoverLetter.objects.count(), 1)
response = self.client.get(
reverse("admin:admin_views_coverletter_change", args=(cl.pk,))
)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"author": "John Doe II",
"_save": "Save",
}
url = reverse("admin:admin_views_coverletter_change", args=(cl.pk,))
response = self.client.post(url, post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(CoverLetter.objects.count(), 1)
# Message should contain non-ugly model verbose name. Instance
# representation is set by model's __str__()
self.assertContains(
response,
'<li class="success">The cover letter “<a href="%s">'
"John Doe II</a>” was changed successfully.</li>"
% reverse("admin:admin_views_coverletter_change", args=(cl.pk,)),
html=True,
)
# model has no __str__ method
sm = ShortMessage.objects.create(content="This is expensive")
self.assertEqual(ShortMessage.objects.count(), 1)
response = self.client.get(
reverse("admin:admin_views_shortmessage_change", args=(sm.pk,))
)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"content": "Too expensive",
"_save": "Save",
}
url = reverse("admin:admin_views_shortmessage_change", args=(sm.pk,))
response = self.client.post(url, post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(ShortMessage.objects.count(), 1)
# Message should contain non-ugly model verbose name. The ugly(!)
# instance representation is set by __str__().
self.assertContains(
response,
'<li class="success">The short message “<a href="%s">'
"%s</a>” was changed successfully.</li>"
% (reverse("admin:admin_views_shortmessage_change", args=(sm.pk,)), sm),
html=True,
)
def test_edit_model_modeladmin_only_qs(self):
# Test for #14529. only() is used in ModelAdmin.get_queryset()
# model has __str__ method
t = Telegram.objects.create(title="First Telegram")
self.assertEqual(Telegram.objects.count(), 1)
response = self.client.get(
reverse("admin:admin_views_telegram_change", args=(t.pk,))
)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"title": "Telegram without typo",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_telegram_change", args=(t.pk,)),
post_data,
follow=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Telegram.objects.count(), 1)
# Message should contain non-ugly model verbose name. The instance
# representation is set by model's __str__()
self.assertContains(
response,
'<li class="success">The telegram “<a href="%s">'
"Telegram without typo</a>” was changed successfully.</li>"
% reverse("admin:admin_views_telegram_change", args=(t.pk,)),
html=True,
)
# model has no __str__ method
p = Paper.objects.create(title="My Paper Title")
self.assertEqual(Paper.objects.count(), 1)
response = self.client.get(
reverse("admin:admin_views_paper_change", args=(p.pk,))
)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"title": "My Modified Paper Title",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_paper_change", args=(p.pk,)),
post_data,
follow=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Paper.objects.count(), 1)
# Message should contain non-ugly model verbose name. The ugly(!)
# instance representation is set by __str__().
self.assertContains(
response,
'<li class="success">The paper “<a href="%s">'
"%s</a>” was changed successfully.</li>"
% (reverse("admin:admin_views_paper_change", args=(p.pk,)), p),
html=True,
)
def test_history_view_custom_qs(self):
"""
Custom querysets are considered for the admin history view.
"""
self.client.post(reverse("admin:login"), self.super_login)
FilteredManager.objects.create(pk=1)
FilteredManager.objects.create(pk=2)
response = self.client.get(
reverse("admin:admin_views_filteredmanager_changelist")
)
self.assertContains(response, "PK=1")
self.assertContains(response, "PK=2")
self.assertEqual(
self.client.get(
reverse("admin:admin_views_filteredmanager_history", args=(1,))
).status_code,
200,
)
self.assertEqual(
self.client.get(
reverse("admin:admin_views_filteredmanager_history", args=(2,))
).status_code,
200,
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminInlineFileUploadTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
file1 = tempfile.NamedTemporaryFile(suffix=".file1")
file1.write(b"a" * (2**21))
filename = file1.name
file1.close()
cls.gallery = Gallery.objects.create(name="Test Gallery")
cls.picture = Picture.objects.create(
name="Test Picture",
image=filename,
gallery=cls.gallery,
)
def setUp(self):
self.client.force_login(self.superuser)
def test_form_has_multipart_enctype(self):
response = self.client.get(
reverse("admin:admin_views_gallery_change", args=(self.gallery.id,))
)
self.assertIs(response.context["has_file_field"], True)
self.assertContains(response, MULTIPART_ENCTYPE)
def test_inline_file_upload_edit_validation_error_post(self):
"""
Inline file uploads correctly display prior data (#10002).
"""
post_data = {
"name": "Test Gallery",
"pictures-TOTAL_FORMS": "2",
"pictures-INITIAL_FORMS": "1",
"pictures-MAX_NUM_FORMS": "0",
"pictures-0-id": str(self.picture.id),
"pictures-0-gallery": str(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post(
reverse("admin:admin_views_gallery_change", args=(self.gallery.id,)),
post_data,
)
self.assertContains(response, b"Currently")
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminInlineTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.collector = Collector.objects.create(pk=1, name="John Fowles")
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
"widget_set-0-name": "",
"widget_set-1-id": "",
"widget_set-1-owner": "1",
"widget_set-1-name": "",
"widget_set-2-id": "",
"widget_set-2-owner": "1",
"widget_set-2-name": "",
"doohickey_set-TOTAL_FORMS": "3",
"doohickey_set-INITIAL_FORMS": "0",
"doohickey_set-MAX_NUM_FORMS": "0",
"doohickey_set-0-owner": "1",
"doohickey_set-0-code": "",
"doohickey_set-0-name": "",
"doohickey_set-1-owner": "1",
"doohickey_set-1-code": "",
"doohickey_set-1-name": "",
"doohickey_set-2-owner": "1",
"doohickey_set-2-code": "",
"doohickey_set-2-name": "",
"grommet_set-TOTAL_FORMS": "3",
"grommet_set-INITIAL_FORMS": "0",
"grommet_set-MAX_NUM_FORMS": "0",
"grommet_set-0-code": "",
"grommet_set-0-owner": "1",
"grommet_set-0-name": "",
"grommet_set-1-code": "",
"grommet_set-1-owner": "1",
"grommet_set-1-name": "",
"grommet_set-2-code": "",
"grommet_set-2-owner": "1",
"grommet_set-2-name": "",
"whatsit_set-TOTAL_FORMS": "3",
"whatsit_set-INITIAL_FORMS": "0",
"whatsit_set-MAX_NUM_FORMS": "0",
"whatsit_set-0-owner": "1",
"whatsit_set-0-index": "",
"whatsit_set-0-name": "",
"whatsit_set-1-owner": "1",
"whatsit_set-1-index": "",
"whatsit_set-1-name": "",
"whatsit_set-2-owner": "1",
"whatsit_set-2-index": "",
"whatsit_set-2-name": "",
"fancydoodad_set-TOTAL_FORMS": "3",
"fancydoodad_set-INITIAL_FORMS": "0",
"fancydoodad_set-MAX_NUM_FORMS": "0",
"fancydoodad_set-0-doodad_ptr": "",
"fancydoodad_set-0-owner": "1",
"fancydoodad_set-0-name": "",
"fancydoodad_set-0-expensive": "on",
"fancydoodad_set-1-doodad_ptr": "",
"fancydoodad_set-1-owner": "1",
"fancydoodad_set-1-name": "",
"fancydoodad_set-1-expensive": "on",
"fancydoodad_set-2-doodad_ptr": "",
"fancydoodad_set-2-owner": "1",
"fancydoodad_set-2-name": "",
"fancydoodad_set-2-expensive": "on",
"category_set-TOTAL_FORMS": "3",
"category_set-INITIAL_FORMS": "0",
"category_set-MAX_NUM_FORMS": "0",
"category_set-0-order": "",
"category_set-0-id": "",
"category_set-0-collector": "1",
"category_set-1-order": "",
"category_set-1-id": "",
"category_set-1-collector": "1",
"category_set-2-order": "",
"category_set-2-id": "",
"category_set-2-collector": "1",
}
self.client.force_login(self.superuser)
def test_simple_inline(self):
"A simple model can be saved as inlines"
# First add a new inline
self.post_data["widget_set-0-name"] = "Widget 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1")
widget_id = Widget.objects.all()[0].id
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="widget_set-0-id"')
# No file or image fields, no enctype on the forms
self.assertIs(response.context["has_file_field"], False)
self.assertNotContains(response, MULTIPART_ENCTYPE)
# Now resave that inline
self.post_data["widget_set-INITIAL_FORMS"] = "1"
self.post_data["widget_set-0-id"] = str(widget_id)
self.post_data["widget_set-0-name"] = "Widget 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1")
# Now modify that inline
self.post_data["widget_set-INITIAL_FORMS"] = "1"
self.post_data["widget_set-0-id"] = str(widget_id)
self.post_data["widget_set-0-name"] = "Widget 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1 Updated")
def test_explicit_autofield_inline(self):
"""
A model with an explicit autofield primary key can be saved as inlines.
"""
# First add a new inline
self.post_data["grommet_set-0-name"] = "Grommet 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1")
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="grommet_set-0-code"')
# Now resave that inline
self.post_data["grommet_set-INITIAL_FORMS"] = "1"
self.post_data["grommet_set-0-code"] = str(Grommet.objects.all()[0].code)
self.post_data["grommet_set-0-name"] = "Grommet 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1")
# Now modify that inline
self.post_data["grommet_set-INITIAL_FORMS"] = "1"
self.post_data["grommet_set-0-code"] = str(Grommet.objects.all()[0].code)
self.post_data["grommet_set-0-name"] = "Grommet 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1 Updated")
def test_char_pk_inline(self):
"A model with a character PK can be saved as inlines. Regression for #10992"
# First add a new inline
self.post_data["doohickey_set-0-code"] = "DH1"
self.post_data["doohickey_set-0-name"] = "Doohickey 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1")
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="doohickey_set-0-code"')
# Now resave that inline
self.post_data["doohickey_set-INITIAL_FORMS"] = "1"
self.post_data["doohickey_set-0-code"] = "DH1"
self.post_data["doohickey_set-0-name"] = "Doohickey 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1")
# Now modify that inline
self.post_data["doohickey_set-INITIAL_FORMS"] = "1"
self.post_data["doohickey_set-0-code"] = "DH1"
self.post_data["doohickey_set-0-name"] = "Doohickey 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1 Updated")
def test_integer_pk_inline(self):
"A model with an integer PK can be saved as inlines. Regression for #10992"
# First add a new inline
self.post_data["whatsit_set-0-index"] = "42"
self.post_data["whatsit_set-0-name"] = "Whatsit 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1")
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="whatsit_set-0-index"')
# Now resave that inline
self.post_data["whatsit_set-INITIAL_FORMS"] = "1"
self.post_data["whatsit_set-0-index"] = "42"
self.post_data["whatsit_set-0-name"] = "Whatsit 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1")
# Now modify that inline
self.post_data["whatsit_set-INITIAL_FORMS"] = "1"
self.post_data["whatsit_set-0-index"] = "42"
self.post_data["whatsit_set-0-name"] = "Whatsit 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1 Updated")
def test_inherited_inline(self):
"An inherited model can be saved as inlines. Regression for #11042"
# First add a new inline
self.post_data["fancydoodad_set-0-name"] = "Fancy Doodad 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1")
doodad_pk = FancyDoodad.objects.all()[0].pk
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="fancydoodad_set-0-doodad_ptr"')
# Now resave that inline
self.post_data["fancydoodad_set-INITIAL_FORMS"] = "1"
self.post_data["fancydoodad_set-0-doodad_ptr"] = str(doodad_pk)
self.post_data["fancydoodad_set-0-name"] = "Fancy Doodad 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1")
# Now modify that inline
self.post_data["fancydoodad_set-INITIAL_FORMS"] = "1"
self.post_data["fancydoodad_set-0-doodad_ptr"] = str(doodad_pk)
self.post_data["fancydoodad_set-0-name"] = "Fancy Doodad 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1 Updated")
def test_ordered_inline(self):
"""
An inline with an editable ordering fields is updated correctly.
"""
# Create some objects with an initial ordering
Category.objects.create(id=1, order=1, collector=self.collector)
Category.objects.create(id=2, order=2, collector=self.collector)
Category.objects.create(id=3, order=0, collector=self.collector)
Category.objects.create(id=4, order=0, collector=self.collector)
# NB: The order values must be changed so that the items are reordered.
self.post_data.update(
{
"name": "Frederick Clegg",
"category_set-TOTAL_FORMS": "7",
"category_set-INITIAL_FORMS": "4",
"category_set-MAX_NUM_FORMS": "0",
"category_set-0-order": "14",
"category_set-0-id": "1",
"category_set-0-collector": "1",
"category_set-1-order": "13",
"category_set-1-id": "2",
"category_set-1-collector": "1",
"category_set-2-order": "1",
"category_set-2-id": "3",
"category_set-2-collector": "1",
"category_set-3-order": "0",
"category_set-3-id": "4",
"category_set-3-collector": "1",
"category_set-4-order": "",
"category_set-4-id": "",
"category_set-4-collector": "1",
"category_set-5-order": "",
"category_set-5-id": "",
"category_set-5-collector": "1",
"category_set-6-order": "",
"category_set-6-id": "",
"category_set-6-collector": "1",
}
)
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
# Successful post will redirect
self.assertEqual(response.status_code, 302)
# The order values have been applied to the right objects
self.assertEqual(self.collector.category_set.count(), 4)
self.assertEqual(Category.objects.get(id=1).order, 14)
self.assertEqual(Category.objects.get(id=2).order, 13)
self.assertEqual(Category.objects.get(id=3).order, 1)
self.assertEqual(Category.objects.get(id=4).order, 0)
@override_settings(ROOT_URLCONF="admin_views.urls")
class NeverCacheTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
def setUp(self):
self.client.force_login(self.superuser)
def test_admin_index(self):
"Check the never-cache status of the main index"
response = self.client.get(reverse("admin:index"))
self.assertEqual(get_max_age(response), 0)
def test_app_index(self):
"Check the never-cache status of an application index"
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertEqual(get_max_age(response), 0)
def test_model_index(self):
"Check the never-cache status of a model index"
response = self.client.get(reverse("admin:admin_views_fabric_changelist"))
self.assertEqual(get_max_age(response), 0)
def test_model_add(self):
"Check the never-cache status of a model add page"
response = self.client.get(reverse("admin:admin_views_fabric_add"))
self.assertEqual(get_max_age(response), 0)
def test_model_view(self):
"Check the never-cache status of a model edit page"
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(get_max_age(response), 0)
def test_model_history(self):
"Check the never-cache status of a model history page"
response = self.client.get(
reverse("admin:admin_views_section_history", args=(self.s1.pk,))
)
self.assertEqual(get_max_age(response), 0)
def test_model_delete(self):
"Check the never-cache status of a model delete page"
response = self.client.get(
reverse("admin:admin_views_section_delete", args=(self.s1.pk,))
)
self.assertEqual(get_max_age(response), 0)
def test_login(self):
"Check the never-cache status of login views"
self.client.logout()
response = self.client.get(reverse("admin:index"))
self.assertEqual(get_max_age(response), 0)
def test_logout(self):
"Check the never-cache status of logout view"
response = self.client.post(reverse("admin:logout"))
self.assertEqual(get_max_age(response), 0)
def test_password_change(self):
"Check the never-cache status of the password change view"
self.client.logout()
response = self.client.get(reverse("admin:password_change"))
self.assertIsNone(get_max_age(response))
def test_password_change_done(self):
"Check the never-cache status of the password change done view"
response = self.client.get(reverse("admin:password_change_done"))
self.assertIsNone(get_max_age(response))
def test_JS_i18n(self):
"Check the never-cache status of the JavaScript i18n view"
response = self.client.get(reverse("admin:jsi18n"))
self.assertIsNone(get_max_age(response))
@override_settings(ROOT_URLCONF="admin_views.urls")
class PrePopulatedTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_prepopulated_on(self):
response = self.client.get(reverse("admin:admin_views_prepopulatedpost_add"))
self.assertContains(response, ""id": "#id_slug"")
self.assertContains(
response, ""dependency_ids": ["#id_title"]"
)
self.assertContains(
response,
""id": "#id_prepopulatedsubpost_set-0-subslug"",
)
def test_prepopulated_off(self):
response = self.client.get(
reverse("admin:admin_views_prepopulatedpost_change", args=(self.p1.pk,))
)
self.assertContains(response, "A Long Title")
self.assertNotContains(response, ""id": "#id_slug"")
self.assertNotContains(
response, ""dependency_ids": ["#id_title"]"
)
self.assertNotContains(
response,
""id": "#id_prepopulatedsubpost_set-0-subslug"",
)
@override_settings(USE_THOUSAND_SEPARATOR=True)
def test_prepopulated_maxlength_localized(self):
"""
Regression test for #15938: if USE_THOUSAND_SEPARATOR is set, make sure
that maxLength (in the JavaScript) is rendered without separators.
"""
response = self.client.get(
reverse("admin:admin_views_prepopulatedpostlargeslug_add")
)
self.assertContains(response, ""maxLength": 1000") # instead of 1,000
def test_view_only_add_form(self):
"""
PrePopulatedPostReadOnlyAdmin.prepopulated_fields includes 'slug'
which is present in the add view, even if the
ModelAdmin.has_change_permission() returns False.
"""
response = self.client.get(reverse("admin7:admin_views_prepopulatedpost_add"))
self.assertContains(response, "data-prepopulated-fields=")
self.assertContains(response, ""id": "#id_slug"")
def test_view_only_change_form(self):
"""
PrePopulatedPostReadOnlyAdmin.prepopulated_fields includes 'slug'. That
doesn't break a view-only change view.
"""
response = self.client.get(
reverse("admin7:admin_views_prepopulatedpost_change", args=(self.p1.pk,))
)
self.assertContains(response, 'data-prepopulated-fields="[]"')
self.assertContains(response, '<div class="readonly">%s</div>' % self.p1.slug)
@override_settings(ROOT_URLCONF="admin_views.urls")
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ["admin_views"] + AdminSeleniumTestCase.available_apps
def setUp(self):
self.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
self.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
def test_login_button_centered(self):
from selenium.webdriver.common.by import By
self.selenium.get(self.live_server_url + reverse("admin:login"))
button = self.selenium.find_element(By.CSS_SELECTOR, ".submit-row input")
offset_left = button.get_property("offsetLeft")
offset_right = button.get_property("offsetParent").get_property(
"offsetWidth"
) - (offset_left + button.get_property("offsetWidth"))
# Use assertAlmostEqual to avoid pixel rounding errors.
self.assertAlmostEqual(offset_left, offset_right, delta=3)
def test_prepopulated_fields(self):
"""
The JavaScript-automated prepopulated fields work with the main form
and with stacked and tabular inlines.
Refs #13068, #9264, #9983, #9784.
"""
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_mainprepopulated_add")
)
self.wait_for(".select2")
# Main form ----------------------------------------------------------
self.selenium.find_element(By.ID, "id_pubdate").send_keys("2012-02-18")
self.select_option("#id_status", "option two")
self.selenium.find_element(By.ID, "id_name").send_keys(
" the mAin nÀMë and it's awεšomeıııİ"
)
slug1 = self.selenium.find_element(By.ID, "id_slug1").get_attribute("value")
slug2 = self.selenium.find_element(By.ID, "id_slug2").get_attribute("value")
slug3 = self.selenium.find_element(By.ID, "id_slug3").get_attribute("value")
self.assertEqual(slug1, "the-main-name-and-its-awesomeiiii-2012-02-18")
self.assertEqual(slug2, "option-two-the-main-name-and-its-awesomeiiii")
self.assertEqual(
slug3, "the-main-n\xe0m\xeb-and-its-aw\u03b5\u0161ome\u0131\u0131\u0131i"
)
# Stacked inlines with fieldsets -------------------------------------
# Initial inline
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-0-pubdate"
).send_keys("2011-12-17")
self.select_option("#id_relatedprepopulated_set-0-status", "option one")
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-0-name"
).send_keys(" here is a sŤāÇkeð inline ! ")
slug1 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-0-slug1"
).get_attribute("value")
slug2 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-0-slug2"
).get_attribute("value")
self.assertEqual(slug1, "here-is-a-stacked-inline-2011-12-17")
self.assertEqual(slug2, "option-one-here-is-a-stacked-inline")
initial_select2_inputs = self.selenium.find_elements(
By.CLASS_NAME, "select2-selection"
)
# Inline formsets have empty/invisible forms.
# Only the 4 visible select2 inputs are initialized.
num_initial_select2_inputs = len(initial_select2_inputs)
self.assertEqual(num_initial_select2_inputs, 4)
# Add an inline
self.selenium.find_elements(By.LINK_TEXT, "Add another Related prepopulated")[
0
].click()
self.assertEqual(
len(self.selenium.find_elements(By.CLASS_NAME, "select2-selection")),
num_initial_select2_inputs + 2,
)
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-1-pubdate"
).send_keys("1999-01-25")
self.select_option("#id_relatedprepopulated_set-1-status", "option two")
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-1-name"
).send_keys(
" now you haVe anöther sŤāÇkeð inline with a very ... "
"loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooog "
"text... "
)
slug1 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-1-slug1"
).get_attribute("value")
slug2 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-1-slug2"
).get_attribute("value")
# 50 characters maximum for slug1 field
self.assertEqual(slug1, "now-you-have-another-stacked-inline-with-a-very-lo")
# 60 characters maximum for slug2 field
self.assertEqual(
slug2, "option-two-now-you-have-another-stacked-inline-with-a-very-l"
)
# Tabular inlines ----------------------------------------------------
# Initial inline
element = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-status"
)
self.selenium.execute_script("window.scrollTo(0, %s);" % element.location["y"])
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-pubdate"
).send_keys("1234-12-07")
self.select_option("#id_relatedprepopulated_set-2-0-status", "option two")
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-name"
).send_keys("And now, with a tÃbűlaŘ inline !!!")
slug1 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-slug1"
).get_attribute("value")
slug2 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-slug2"
).get_attribute("value")
self.assertEqual(slug1, "and-now-with-a-tabular-inline-1234-12-07")
self.assertEqual(slug2, "option-two-and-now-with-a-tabular-inline")
# Add an inline
# Button may be outside the browser frame.
element = self.selenium.find_elements(
By.LINK_TEXT, "Add another Related prepopulated"
)[1]
self.selenium.execute_script("window.scrollTo(0, %s);" % element.location["y"])
element.click()
self.assertEqual(
len(self.selenium.find_elements(By.CLASS_NAME, "select2-selection")),
num_initial_select2_inputs + 4,
)
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-1-pubdate"
).send_keys("1981-08-22")
self.select_option("#id_relatedprepopulated_set-2-1-status", "option one")
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-1-name"
).send_keys(r'tÃbűlaŘ inline with ignored ;"&*^\%$#@-/`~ characters')
slug1 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-1-slug1"
).get_attribute("value")
slug2 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-1-slug2"
).get_attribute("value")
self.assertEqual(slug1, "tabular-inline-with-ignored-characters-1981-08-22")
self.assertEqual(slug2, "option-one-tabular-inline-with-ignored-characters")
# Add an inline without an initial inline.
# The button is outside of the browser frame.
self.selenium.execute_script("window.scrollTo(0, document.body.scrollHeight);")
self.selenium.find_elements(By.LINK_TEXT, "Add another Related prepopulated")[
2
].click()
self.assertEqual(
len(self.selenium.find_elements(By.CLASS_NAME, "select2-selection")),
num_initial_select2_inputs + 6,
)
# Stacked Inlines without fieldsets ----------------------------------
# Initial inline.
row_id = "id_relatedprepopulated_set-4-0-"
self.selenium.find_element(By.ID, f"{row_id}pubdate").send_keys("2011-12-12")
self.select_option(f"#{row_id}status", "option one")
self.selenium.find_element(By.ID, f"{row_id}name").send_keys(
" sŤāÇkeð inline ! "
)
slug1 = self.selenium.find_element(By.ID, f"{row_id}slug1").get_attribute(
"value"
)
slug2 = self.selenium.find_element(By.ID, f"{row_id}slug2").get_attribute(
"value"
)
self.assertEqual(slug1, "stacked-inline-2011-12-12")
self.assertEqual(slug2, "option-one")
# Add inline.
self.selenium.find_elements(
By.LINK_TEXT,
"Add another Related prepopulated",
)[3].click()
row_id = "id_relatedprepopulated_set-4-1-"
self.selenium.find_element(By.ID, f"{row_id}pubdate").send_keys("1999-01-20")
self.select_option(f"#{row_id}status", "option two")
self.selenium.find_element(By.ID, f"{row_id}name").send_keys(
" now you haVe anöther sŤāÇkeð inline with a very loooong "
)
slug1 = self.selenium.find_element(By.ID, f"{row_id}slug1").get_attribute(
"value"
)
slug2 = self.selenium.find_element(By.ID, f"{row_id}slug2").get_attribute(
"value"
)
self.assertEqual(slug1, "now-you-have-another-stacked-inline-with-a-very-lo")
self.assertEqual(slug2, "option-two")
# Save and check that everything is properly stored in the database
with self.wait_page_loaded():
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.assertEqual(MainPrepopulated.objects.count(), 1)
MainPrepopulated.objects.get(
name=" the mAin nÀMë and it's awεšomeıııİ",
pubdate="2012-02-18",
status="option two",
slug1="the-main-name-and-its-awesomeiiii-2012-02-18",
slug2="option-two-the-main-name-and-its-awesomeiiii",
slug3="the-main-nàmë-and-its-awεšomeıııi",
)
self.assertEqual(RelatedPrepopulated.objects.count(), 6)
RelatedPrepopulated.objects.get(
name=" here is a sŤāÇkeð inline ! ",
pubdate="2011-12-17",
status="option one",
slug1="here-is-a-stacked-inline-2011-12-17",
slug2="option-one-here-is-a-stacked-inline",
)
RelatedPrepopulated.objects.get(
# 75 characters in name field
name=(
" now you haVe anöther sŤāÇkeð inline with a very ... "
"loooooooooooooooooo"
),
pubdate="1999-01-25",
status="option two",
slug1="now-you-have-another-stacked-inline-with-a-very-lo",
slug2="option-two-now-you-have-another-stacked-inline-with-a-very-l",
)
RelatedPrepopulated.objects.get(
name="And now, with a tÃbűlaŘ inline !!!",
pubdate="1234-12-07",
status="option two",
slug1="and-now-with-a-tabular-inline-1234-12-07",
slug2="option-two-and-now-with-a-tabular-inline",
)
RelatedPrepopulated.objects.get(
name=r'tÃbűlaŘ inline with ignored ;"&*^\%$#@-/`~ characters',
pubdate="1981-08-22",
status="option one",
slug1="tabular-inline-with-ignored-characters-1981-08-22",
slug2="option-one-tabular-inline-with-ignored-characters",
)
def test_populate_existing_object(self):
"""
The prepopulation works for existing objects too, as long as
the original field is empty (#19082).
"""
from selenium.webdriver.common.by import By
# Slugs are empty to start with.
item = MainPrepopulated.objects.create(
name=" this is the mAin nÀMë",
pubdate="2012-02-18",
status="option two",
slug1="",
slug2="",
)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
object_url = self.live_server_url + reverse(
"admin:admin_views_mainprepopulated_change", args=(item.id,)
)
self.selenium.get(object_url)
self.selenium.find_element(By.ID, "id_name").send_keys(" the best")
# The slugs got prepopulated since they were originally empty
slug1 = self.selenium.find_element(By.ID, "id_slug1").get_attribute("value")
slug2 = self.selenium.find_element(By.ID, "id_slug2").get_attribute("value")
self.assertEqual(slug1, "this-is-the-main-name-the-best-2012-02-18")
self.assertEqual(slug2, "option-two-this-is-the-main-name-the-best")
# Save the object
with self.wait_page_loaded():
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.get(object_url)
self.selenium.find_element(By.ID, "id_name").send_keys(" hello")
# The slugs got prepopulated didn't change since they were originally not empty
slug1 = self.selenium.find_element(By.ID, "id_slug1").get_attribute("value")
slug2 = self.selenium.find_element(By.ID, "id_slug2").get_attribute("value")
self.assertEqual(slug1, "this-is-the-main-name-the-best-2012-02-18")
self.assertEqual(slug2, "option-two-this-is-the-main-name-the-best")
def test_collapsible_fieldset(self):
"""
The 'collapse' class in fieldsets definition allows to
show/hide the appropriate field section.
"""
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_article_add")
)
self.assertFalse(self.selenium.find_element(By.ID, "id_title").is_displayed())
self.selenium.find_elements(By.LINK_TEXT, "Show")[0].click()
self.assertTrue(self.selenium.find_element(By.ID, "id_title").is_displayed())
self.assertEqual(
self.selenium.find_element(By.ID, "fieldsetcollapser0").text, "Hide"
)
def test_selectbox_height_collapsible_fieldset(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super",
password="secret",
login_url=reverse("admin7:index"),
)
url = self.live_server_url + reverse("admin7:admin_views_pizza_add")
self.selenium.get(url)
self.selenium.find_elements(By.LINK_TEXT, "Show")[0].click()
filter_box = self.selenium.find_element(By.ID, "id_toppings_filter")
from_box = self.selenium.find_element(By.ID, "id_toppings_from")
to_box = self.selenium.find_element(By.ID, "id_toppings_to")
self.assertEqual(
to_box.get_property("offsetHeight"),
(
filter_box.get_property("offsetHeight")
+ from_box.get_property("offsetHeight")
),
)
def test_selectbox_height_not_collapsible_fieldset(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super",
password="secret",
login_url=reverse("admin7:index"),
)
url = self.live_server_url + reverse("admin7:admin_views_question_add")
self.selenium.get(url)
filter_box = self.selenium.find_element(By.ID, "id_related_questions_filter")
from_box = self.selenium.find_element(By.ID, "id_related_questions_from")
to_box = self.selenium.find_element(By.ID, "id_related_questions_to")
self.assertEqual(
to_box.get_property("offsetHeight"),
(
filter_box.get_property("offsetHeight")
+ from_box.get_property("offsetHeight")
),
)
def test_first_field_focus(self):
"""JavaScript-assisted auto-focus on first usable form field."""
from selenium.webdriver.common.by import By
# First form field has a single widget
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
with self.wait_page_loaded():
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_picture_add")
)
self.assertEqual(
self.selenium.switch_to.active_element,
self.selenium.find_element(By.ID, "id_name"),
)
# First form field has a MultiWidget
with self.wait_page_loaded():
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_reservation_add")
)
self.assertEqual(
self.selenium.switch_to.active_element,
self.selenium.find_element(By.ID, "id_start_date_0"),
)
def test_cancel_delete_confirmation(self):
"Cancelling the deletion of an object takes the user back one page."
from selenium.webdriver.common.by import By
pizza = Pizza.objects.create(name="Double Cheese")
url = reverse("admin:admin_views_pizza_change", args=(pizza.id,))
full_url = self.live_server_url + url
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(full_url)
self.selenium.find_element(By.CLASS_NAME, "deletelink").click()
# Click 'cancel' on the delete page.
self.selenium.find_element(By.CLASS_NAME, "cancel-link").click()
# Wait until we're back on the change page.
self.wait_for_text("#content h1", "Change pizza")
self.assertEqual(self.selenium.current_url, full_url)
self.assertEqual(Pizza.objects.count(), 1)
def test_cancel_delete_related_confirmation(self):
"""
Cancelling the deletion of an object with relations takes the user back
one page.
"""
from selenium.webdriver.common.by import By
pizza = Pizza.objects.create(name="Double Cheese")
topping1 = Topping.objects.create(name="Cheddar")
topping2 = Topping.objects.create(name="Mozzarella")
pizza.toppings.add(topping1, topping2)
url = reverse("admin:admin_views_pizza_change", args=(pizza.id,))
full_url = self.live_server_url + url
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(full_url)
self.selenium.find_element(By.CLASS_NAME, "deletelink").click()
# Click 'cancel' on the delete page.
self.selenium.find_element(By.CLASS_NAME, "cancel-link").click()
# Wait until we're back on the change page.
self.wait_for_text("#content h1", "Change pizza")
self.assertEqual(self.selenium.current_url, full_url)
self.assertEqual(Pizza.objects.count(), 1)
self.assertEqual(Topping.objects.count(), 2)
def test_list_editable_popups(self):
"""
list_editable foreign keys have add/change popups.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
s1 = Section.objects.create(name="Test section")
Article.objects.create(
title="foo",
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=s1,
)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_article_changelist")
)
# Change popup
self.selenium.find_element(By.ID, "change_id_form-0-section").click()
self.wait_for_and_switch_to_popup()
self.wait_for_text("#content h1", "Change section")
name_input = self.selenium.find_element(By.ID, "id_name")
name_input.clear()
name_input.send_keys("<i>edited section</i>")
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
# Hide sidebar.
toggle_button = self.selenium.find_element(
By.CSS_SELECTOR, "#toggle-nav-sidebar"
)
toggle_button.click()
select = Select(self.selenium.find_element(By.ID, "id_form-0-section"))
self.assertEqual(select.first_selected_option.text, "<i>edited section</i>")
# Rendered select2 input.
select2_display = self.selenium.find_element(
By.CLASS_NAME, "select2-selection__rendered"
)
# Clear button (×\n) is included in text.
self.assertEqual(select2_display.text, "×\n<i>edited section</i>")
# Add popup
self.selenium.find_element(By.ID, "add_id_form-0-section").click()
self.wait_for_and_switch_to_popup()
self.wait_for_text("#content h1", "Add section")
self.selenium.find_element(By.ID, "id_name").send_keys("new section")
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
select = Select(self.selenium.find_element(By.ID, "id_form-0-section"))
self.assertEqual(select.first_selected_option.text, "new section")
select2_display = self.selenium.find_element(
By.CLASS_NAME, "select2-selection__rendered"
)
# Clear button (×\n) is included in text.
self.assertEqual(select2_display.text, "×\nnew section")
def test_inline_uuid_pk_edit_with_popup(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
parent = ParentWithUUIDPK.objects.create(title="test")
related_with_parent = RelatedWithUUIDPKModel.objects.create(parent=parent)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
change_url = reverse(
"admin:admin_views_relatedwithuuidpkmodel_change",
args=(related_with_parent.id,),
)
self.selenium.get(self.live_server_url + change_url)
self.selenium.find_element(By.ID, "change_id_parent").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
select = Select(self.selenium.find_element(By.ID, "id_parent"))
self.assertEqual(select.first_selected_option.text, str(parent.id))
self.assertEqual(
select.first_selected_option.get_attribute("value"), str(parent.id)
)
def test_inline_uuid_pk_add_with_popup(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(
self.live_server_url
+ reverse("admin:admin_views_relatedwithuuidpkmodel_add")
)
self.selenium.find_element(By.ID, "add_id_parent").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.ID, "id_title").send_keys("test")
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
select = Select(self.selenium.find_element(By.ID, "id_parent"))
uuid_id = str(ParentWithUUIDPK.objects.first().id)
self.assertEqual(select.first_selected_option.text, uuid_id)
self.assertEqual(select.first_selected_option.get_attribute("value"), uuid_id)
def test_inline_uuid_pk_delete_with_popup(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
parent = ParentWithUUIDPK.objects.create(title="test")
related_with_parent = RelatedWithUUIDPKModel.objects.create(parent=parent)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
change_url = reverse(
"admin:admin_views_relatedwithuuidpkmodel_change",
args=(related_with_parent.id,),
)
self.selenium.get(self.live_server_url + change_url)
self.selenium.find_element(By.ID, "delete_id_parent").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.XPATH, '//input[@value="Yes, I’m sure"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
select = Select(self.selenium.find_element(By.ID, "id_parent"))
self.assertEqual(ParentWithUUIDPK.objects.count(), 0)
self.assertEqual(select.first_selected_option.text, "---------")
self.assertEqual(select.first_selected_option.get_attribute("value"), "")
def test_inline_with_popup_cancel_delete(self):
"""Clicking ""No, take me back" on a delete popup closes the window."""
from selenium.webdriver.common.by import By
parent = ParentWithUUIDPK.objects.create(title="test")
related_with_parent = RelatedWithUUIDPKModel.objects.create(parent=parent)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
change_url = reverse(
"admin:admin_views_relatedwithuuidpkmodel_change",
args=(related_with_parent.id,),
)
self.selenium.get(self.live_server_url + change_url)
self.selenium.find_element(By.ID, "delete_id_parent").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.XPATH, '//a[text()="No, take me back"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
self.assertEqual(len(self.selenium.window_handles), 1)
def test_list_editable_raw_id_fields(self):
from selenium.webdriver.common.by import By
parent = ParentWithUUIDPK.objects.create(title="test")
parent2 = ParentWithUUIDPK.objects.create(title="test2")
RelatedWithUUIDPKModel.objects.create(parent=parent)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
change_url = reverse(
"admin:admin_views_relatedwithuuidpkmodel_changelist",
current_app=site2.name,
)
self.selenium.get(self.live_server_url + change_url)
self.selenium.find_element(By.ID, "lookup_id_form-0-parent").click()
self.wait_for_and_switch_to_popup()
# Select "parent2" in the popup.
self.selenium.find_element(By.LINK_TEXT, str(parent2.pk)).click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
# The newly selected pk should appear in the raw id input.
value = self.selenium.find_element(By.ID, "id_form-0-parent").get_attribute(
"value"
)
self.assertEqual(value, str(parent2.pk))
def test_input_element_font(self):
"""
Browsers' default stylesheets override the font of inputs. The admin
adds additional CSS to handle this.
"""
from selenium.webdriver.common.by import By
self.selenium.get(self.live_server_url + reverse("admin:login"))
element = self.selenium.find_element(By.ID, "id_username")
# Some browsers quotes the fonts, some don't.
fonts = [
font.strip().strip('"')
for font in element.value_of_css_property("font-family").split(",")
]
self.assertEqual(
fonts,
[
"-apple-system",
"BlinkMacSystemFont",
"Segoe UI",
"system-ui",
"Roboto",
"Helvetica Neue",
"Arial",
"sans-serif",
"Apple Color Emoji",
"Segoe UI Emoji",
"Segoe UI Symbol",
"Noto Color Emoji",
],
)
def test_search_input_filtered_page(self):
from selenium.webdriver.common.by import By
Person.objects.create(name="Guido van Rossum", gender=1, alive=True)
Person.objects.create(name="Grace Hopper", gender=1, alive=False)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
person_url = reverse("admin:admin_views_person_changelist") + "?q=Gui"
self.selenium.get(self.live_server_url + person_url)
self.assertGreater(
self.selenium.find_element(By.ID, "searchbar").rect["width"],
50,
)
def test_related_popup_index(self):
"""
Create a chain of 'self' related objects via popups.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin:admin_views_box_add", current_app=site.name)
self.selenium.get(self.live_server_url + add_url)
base_window = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup()
popup_window_test = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "id_title").send_keys("test")
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup(num_windows=3)
popup_window_test2 = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "id_title").send_keys("test2")
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup(num_windows=4)
self.selenium.find_element(By.ID, "id_title").send_keys("test3")
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(popup_window_test2)
select = Select(self.selenium.find_element(By.ID, "id_next_box"))
next_box_id = str(Box.objects.get(title="test3").id)
self.assertEqual(
select.first_selected_option.get_attribute("value"), next_box_id
)
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(popup_window_test)
select = Select(self.selenium.find_element(By.ID, "id_next_box"))
next_box_id = str(Box.objects.get(title="test2").id)
self.assertEqual(
select.first_selected_option.get_attribute("value"), next_box_id
)
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(base_window)
select = Select(self.selenium.find_element(By.ID, "id_next_box"))
next_box_id = str(Box.objects.get(title="test").id)
self.assertEqual(
select.first_selected_option.get_attribute("value"), next_box_id
)
def test_related_popup_incorrect_close(self):
"""
Cleanup child popups when closing a parent popup.
"""
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin:admin_views_box_add", current_app=site.name)
self.selenium.get(self.live_server_url + add_url)
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup()
test_window = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "id_title").send_keys("test")
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup(num_windows=3)
test2_window = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "id_title").send_keys("test2")
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup(num_windows=4)
self.assertEqual(len(self.selenium.window_handles), 4)
self.selenium.switch_to.window(test2_window)
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.wait_until(lambda d: len(d.window_handles) == 2, 1)
self.assertEqual(len(self.selenium.window_handles), 2)
# Close final popup to clean up test.
self.selenium.switch_to.window(test_window)
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.wait_until(lambda d: len(d.window_handles) == 1, 1)
self.selenium.switch_to.window(self.selenium.window_handles[-1])
def test_hidden_fields_small_window(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super",
password="secret",
login_url=reverse("admin:index"),
)
self.selenium.get(self.live_server_url + reverse("admin:admin_views_story_add"))
field_title = self.selenium.find_element(By.CLASS_NAME, "field-title")
current_size = self.selenium.get_window_size()
try:
self.selenium.set_window_size(1024, 768)
self.assertIs(field_title.is_displayed(), False)
self.selenium.set_window_size(767, 575)
self.assertIs(field_title.is_displayed(), False)
finally:
self.selenium.set_window_size(current_size["width"], current_size["height"])
def test_updating_related_objects_updates_fk_selects(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
born_country_select_id = "id_born_country"
living_country_select_id = "id_living_country"
favorite_country_to_vacation_select_id = "id_favorite_country_to_vacation"
continent_select_id = "id_continent"
def _get_HTML_inside_element_by_id(id_):
return self.selenium.find_element(By.ID, id_).get_attribute("innerHTML")
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin:admin_views_traveler_add")
self.selenium.get(self.live_server_url + add_url)
# Add new Country from the born_country select.
self.selenium.find_element(By.ID, f"add_{born_country_select_id}").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.ID, "id_name").send_keys("Argentina")
continent_select = Select(
self.selenium.find_element(By.ID, continent_select_id)
)
continent_select.select_by_visible_text("South America")
self.selenium.find_element(By.CSS_SELECTOR, '[type="submit"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(born_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1" selected="">Argentina</option>
""",
)
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(living_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1">Argentina</option>
""",
)
# Argentina won't appear because favorite_country_to_vacation field has
# limit_choices_to.
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(favorite_country_to_vacation_select_id),
'<option value="" selected="">---------</option>',
)
# Add new Country from the living_country select.
self.selenium.find_element(By.ID, f"add_{living_country_select_id}").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.ID, "id_name").send_keys("Spain")
continent_select = Select(
self.selenium.find_element(By.ID, continent_select_id)
)
continent_select.select_by_visible_text("Europe")
self.selenium.find_element(By.CSS_SELECTOR, '[type="submit"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(born_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1" selected="">Argentina</option>
<option value="2">Spain</option>
""",
)
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(living_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1">Argentina</option>
<option value="2" selected="">Spain</option>
""",
)
# Spain won't appear because favorite_country_to_vacation field has
# limit_choices_to.
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(favorite_country_to_vacation_select_id),
'<option value="" selected="">---------</option>',
)
# Edit second Country created from living_country select.
favorite_select = Select(
self.selenium.find_element(By.ID, living_country_select_id)
)
favorite_select.select_by_visible_text("Spain")
self.selenium.find_element(By.ID, f"change_{living_country_select_id}").click()
self.wait_for_and_switch_to_popup()
favorite_name_input = self.selenium.find_element(By.ID, "id_name")
favorite_name_input.clear()
favorite_name_input.send_keys("Italy")
self.selenium.find_element(By.CSS_SELECTOR, '[type="submit"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(born_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1" selected="">Argentina</option>
<option value="2">Italy</option>
""",
)
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(living_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1">Argentina</option>
<option value="2" selected="">Italy</option>
""",
)
# favorite_country_to_vacation field has no options.
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(favorite_country_to_vacation_select_id),
'<option value="" selected="">---------</option>',
)
# Add a new Asian country.
self.selenium.find_element(
By.ID, f"add_{favorite_country_to_vacation_select_id}"
).click()
self.wait_for_and_switch_to_popup()
favorite_name_input = self.selenium.find_element(By.ID, "id_name")
favorite_name_input.send_keys("Qatar")
continent_select = Select(
self.selenium.find_element(By.ID, continent_select_id)
)
continent_select.select_by_visible_text("Asia")
self.selenium.find_element(By.CSS_SELECTOR, '[type="submit"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
# Submit the new Traveler.
self.selenium.find_element(By.CSS_SELECTOR, '[name="_save"]').click()
traveler = Traveler.objects.get()
self.assertEqual(traveler.born_country.name, "Argentina")
self.assertEqual(traveler.living_country.name, "Italy")
self.assertEqual(traveler.favorite_country_to_vacation.name, "Qatar")
def test_redirect_on_add_view_add_another_button(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin7:admin_views_section_add")
self.selenium.get(self.live_server_url + add_url)
name_input = self.selenium.find_element(By.ID, "id_name")
name_input.send_keys("Test section 1")
self.selenium.find_element(
By.XPATH, '//input[@value="Save and add another"]'
).click()
self.assertEqual(Section.objects.count(), 1)
name_input = self.selenium.find_element(By.ID, "id_name")
name_input.send_keys("Test section 2")
self.selenium.find_element(
By.XPATH, '//input[@value="Save and add another"]'
).click()
self.assertEqual(Section.objects.count(), 2)
def test_redirect_on_add_view_continue_button(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin7:admin_views_section_add")
self.selenium.get(self.live_server_url + add_url)
name_input = self.selenium.find_element(By.ID, "id_name")
name_input.send_keys("Test section 1")
self.selenium.find_element(
By.XPATH, '//input[@value="Save and continue editing"]'
).click()
self.assertEqual(Section.objects.count(), 1)
name_input = self.selenium.find_element(By.ID, "id_name")
name_input_value = name_input.get_attribute("value")
self.assertEqual(name_input_value, "Test section 1")
@override_settings(ROOT_URLCONF="admin_views.urls")
class ReadonlyTest(AdminFieldExtractionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_readonly_get(self):
response = self.client.get(reverse("admin:admin_views_post_add"))
self.assertNotContains(response, 'name="posted"')
# 3 fields + 2 submit buttons + 5 inline management form fields, + 2
# hidden fields for inlines + 1 field for the inline + 2 empty form
# + 1 logout form.
self.assertContains(response, "<input", count=17)
self.assertContains(response, formats.localize(datetime.date.today()))
self.assertContains(response, "<label>Awesomeness level:</label>")
self.assertContains(response, "Very awesome.")
self.assertContains(response, "Unknown coolness.")
self.assertContains(response, "foo")
# Multiline text in a readonly field gets <br> tags
self.assertContains(response, "Multiline<br>test<br>string")
self.assertContains(
response,
'<div class="readonly">Multiline<br>html<br>content</div>',
html=True,
)
self.assertContains(response, "InlineMultiline<br>test<br>string")
self.assertContains(
response,
formats.localize(datetime.date.today() - datetime.timedelta(days=7)),
)
self.assertContains(response, '<div class="form-row field-coolness">')
self.assertContains(response, '<div class="form-row field-awesomeness_level">')
self.assertContains(response, '<div class="form-row field-posted">')
self.assertContains(response, '<div class="form-row field-value">')
self.assertContains(response, '<div class="form-row">')
self.assertContains(response, '<div class="help"', 3)
self.assertContains(
response,
'<div class="help" id="id_title_helptext">Some help text for the title '
"(with Unicode ŠĐĆŽćžšđ)</div>",
html=True,
)
self.assertContains(
response,
'<div class="help" id="id_content_helptext">Some help text for the content '
"(with Unicode ŠĐĆŽćžšđ)</div>",
html=True,
)
self.assertContains(
response,
'<div class="help">Some help text for the date (with Unicode ŠĐĆŽćžšđ)'
"</div>",
html=True,
)
p = Post.objects.create(
title="I worked on readonly_fields", content="Its good stuff"
)
response = self.client.get(
reverse("admin:admin_views_post_change", args=(p.pk,))
)
self.assertContains(response, "%d amount of cool" % p.pk)
def test_readonly_text_field(self):
p = Post.objects.create(
title="Readonly test",
content="test",
readonly_content="test\r\n\r\ntest\r\n\r\ntest\r\n\r\ntest",
)
Link.objects.create(
url="http://www.djangoproject.com",
post=p,
readonly_link_content="test\r\nlink",
)
response = self.client.get(
reverse("admin:admin_views_post_change", args=(p.pk,))
)
# Checking readonly field.
self.assertContains(response, "test<br><br>test<br><br>test<br><br>test")
# Checking readonly field in inline.
self.assertContains(response, "test<br>link")
def test_readonly_post(self):
data = {
"title": "Django Got Readonly Fields",
"content": "This is an incredible development.",
"link_set-TOTAL_FORMS": "1",
"link_set-INITIAL_FORMS": "0",
"link_set-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse("admin:admin_views_post_add"), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Post.objects.count(), 1)
p = Post.objects.get()
self.assertEqual(p.posted, datetime.date.today())
data["posted"] = "10-8-1990" # some date that's not today
response = self.client.post(reverse("admin:admin_views_post_add"), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Post.objects.count(), 2)
p = Post.objects.order_by("-id")[0]
self.assertEqual(p.posted, datetime.date.today())
def test_readonly_manytomany(self):
"Regression test for #13004"
response = self.client.get(reverse("admin:admin_views_pizza_add"))
self.assertEqual(response.status_code, 200)
def test_user_password_change_limited_queryset(self):
su = User.objects.filter(is_superuser=True)[0]
response = self.client.get(
reverse("admin2:auth_user_password_change", args=(su.pk,))
)
self.assertEqual(response.status_code, 404)
def test_change_form_renders_correct_null_choice_value(self):
"""
Regression test for #17911.
"""
choice = Choice.objects.create(choice=None)
response = self.client.get(
reverse("admin:admin_views_choice_change", args=(choice.pk,))
)
self.assertContains(
response, '<div class="readonly">No opinion</div>', html=True
)
def _test_readonly_foreignkey_links(self, admin_site):
"""
ForeignKey readonly fields render as links if the target model is
registered in admin.
"""
chapter = Chapter.objects.create(
title="Chapter 1",
content="content",
book=Book.objects.create(name="Book 1"),
)
language = Language.objects.create(iso="_40", name="Test")
obj = ReadOnlyRelatedField.objects.create(
chapter=chapter,
language=language,
user=self.superuser,
)
response = self.client.get(
reverse(
f"{admin_site}:admin_views_readonlyrelatedfield_change", args=(obj.pk,)
),
)
# Related ForeignKey object registered in admin.
user_url = reverse(f"{admin_site}:auth_user_change", args=(self.superuser.pk,))
self.assertContains(
response,
'<div class="readonly"><a href="%s">super</a></div>' % user_url,
html=True,
)
# Related ForeignKey with the string primary key registered in admin.
language_url = reverse(
f"{admin_site}:admin_views_language_change",
args=(quote(language.pk),),
)
self.assertContains(
response,
'<div class="readonly"><a href="%s">_40</a></div>' % language_url,
html=True,
)
# Related ForeignKey object not registered in admin.
self.assertContains(
response, '<div class="readonly">Chapter 1</div>', html=True
)
def test_readonly_foreignkey_links_default_admin_site(self):
self._test_readonly_foreignkey_links("admin")
def test_readonly_foreignkey_links_custom_admin_site(self):
self._test_readonly_foreignkey_links("namespaced_admin")
def test_readonly_manytomany_backwards_ref(self):
"""
Regression test for #16433 - backwards references for related objects
broke if the related field is read-only due to the help_text attribute
"""
topping = Topping.objects.create(name="Salami")
pizza = Pizza.objects.create(name="Americano")
pizza.toppings.add(topping)
response = self.client.get(reverse("admin:admin_views_topping_add"))
self.assertEqual(response.status_code, 200)
def test_readonly_manytomany_forwards_ref(self):
topping = Topping.objects.create(name="Salami")
pizza = Pizza.objects.create(name="Americano")
pizza.toppings.add(topping)
response = self.client.get(
reverse("admin:admin_views_pizza_change", args=(pizza.pk,))
)
self.assertContains(response, "<label>Toppings:</label>", html=True)
self.assertContains(response, '<div class="readonly">Salami</div>', html=True)
def test_readonly_onetoone_backwards_ref(self):
"""
Can reference a reverse OneToOneField in ModelAdmin.readonly_fields.
"""
v1 = Villain.objects.create(name="Adam")
pl = Plot.objects.create(name="Test Plot", team_leader=v1, contact=v1)
pd = PlotDetails.objects.create(details="Brand New Plot", plot=pl)
response = self.client.get(
reverse("admin:admin_views_plotproxy_change", args=(pl.pk,))
)
field = self.get_admin_readonly_field(response, "plotdetails")
pd_url = reverse("admin:admin_views_plotdetails_change", args=(pd.pk,))
self.assertEqual(field.contents(), '<a href="%s">Brand New Plot</a>' % pd_url)
# The reverse relation also works if the OneToOneField is null.
pd.plot = None
pd.save()
response = self.client.get(
reverse("admin:admin_views_plotproxy_change", args=(pl.pk,))
)
field = self.get_admin_readonly_field(response, "plotdetails")
self.assertEqual(field.contents(), "-") # default empty value
def test_readonly_field_overrides(self):
"""
Regression test for #22087 - ModelForm Meta overrides are ignored by
AdminReadonlyField
"""
p = FieldOverridePost.objects.create(title="Test Post", content="Test Content")
response = self.client.get(
reverse("admin:admin_views_fieldoverridepost_change", args=(p.pk,))
)
self.assertContains(
response,
'<div class="help">Overridden help text for the date</div>',
html=True,
)
self.assertContains(
response,
'<label for="id_public">Overridden public label:</label>',
html=True,
)
self.assertNotContains(
response, "Some help text for the date (with Unicode ŠĐĆŽćžšđ)"
)
def test_correct_autoescaping(self):
"""
Make sure that non-field readonly elements are properly autoescaped (#24461)
"""
section = Section.objects.create(name="<a>evil</a>")
response = self.client.get(
reverse("admin:admin_views_section_change", args=(section.pk,))
)
self.assertNotContains(response, "<a>evil</a>", status_code=200)
self.assertContains(response, "<a>evil</a>", status_code=200)
def test_label_suffix_translated(self):
pizza = Pizza.objects.create(name="Americano")
url = reverse("admin:admin_views_pizza_change", args=(pizza.pk,))
with self.settings(LANGUAGE_CODE="fr"):
response = self.client.get(url)
self.assertContains(response, "<label>Toppings\u00A0:</label>", html=True)
@override_settings(ROOT_URLCONF="admin_views.urls")
class LimitChoicesToInAdminTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_limit_choices_to_as_callable(self):
"""Test for ticket 2445 changes to admin."""
threepwood = Character.objects.create(
username="threepwood",
last_action=datetime.datetime.today() + datetime.timedelta(days=1),
)
marley = Character.objects.create(
username="marley",
last_action=datetime.datetime.today() - datetime.timedelta(days=1),
)
response = self.client.get(reverse("admin:admin_views_stumpjoke_add"))
# The allowed option should appear twice; the limited option should not appear.
self.assertContains(response, threepwood.username, count=2)
self.assertNotContains(response, marley.username)
@override_settings(ROOT_URLCONF="admin_views.urls")
class RawIdFieldsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_limit_choices_to(self):
"""Regression test for 14880"""
actor = Actor.objects.create(name="Palin", age=27)
Inquisition.objects.create(expected=True, leader=actor, country="England")
Inquisition.objects.create(expected=False, leader=actor, country="Spain")
response = self.client.get(reverse("admin:admin_views_sketch_add"))
# Find the link
m = re.search(
rb'<a href="([^"]*)"[^>]* id="lookup_id_inquisition"', response.content
)
self.assertTrue(m) # Got a match
popup_url = m[1].decode().replace("&", "&")
# Handle relative links
popup_url = urljoin(response.request["PATH_INFO"], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step also tests integers, strings and booleans in the
# lookup query string; in model we define inquisition field to have a
# limit_choices_to option that includes a filter on a string field
# (inquisition__actor__name), a filter on an integer field
# (inquisition__actor__age), and a filter on a boolean field
# (inquisition__expected).
response2 = self.client.get(popup_url)
self.assertContains(response2, "Spain")
self.assertNotContains(response2, "England")
def test_limit_choices_to_isnull_false(self):
"""Regression test for 20182"""
Actor.objects.create(name="Palin", age=27)
Actor.objects.create(name="Kilbraken", age=50, title="Judge")
response = self.client.get(reverse("admin:admin_views_sketch_add"))
# Find the link
m = re.search(
rb'<a href="([^"]*)"[^>]* id="lookup_id_defendant0"', response.content
)
self.assertTrue(m) # Got a match
popup_url = m[1].decode().replace("&", "&")
# Handle relative links
popup_url = urljoin(response.request["PATH_INFO"], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step tests field__isnull=0 gets parsed correctly from the
# lookup query string; in model we define defendant0 field to have a
# limit_choices_to option that includes "actor__title__isnull=False".
response2 = self.client.get(popup_url)
self.assertContains(response2, "Kilbraken")
self.assertNotContains(response2, "Palin")
def test_limit_choices_to_isnull_true(self):
"""Regression test for 20182"""
Actor.objects.create(name="Palin", age=27)
Actor.objects.create(name="Kilbraken", age=50, title="Judge")
response = self.client.get(reverse("admin:admin_views_sketch_add"))
# Find the link
m = re.search(
rb'<a href="([^"]*)"[^>]* id="lookup_id_defendant1"', response.content
)
self.assertTrue(m) # Got a match
popup_url = m[1].decode().replace("&", "&")
# Handle relative links
popup_url = urljoin(response.request["PATH_INFO"], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step tests field__isnull=1 gets parsed correctly from the
# lookup query string; in model we define defendant1 field to have a
# limit_choices_to option that includes "actor__title__isnull=True".
response2 = self.client.get(popup_url)
self.assertNotContains(response2, "Kilbraken")
self.assertContains(response2, "Palin")
def test_list_display_method_same_name_as_reverse_accessor(self):
"""
Should be able to use a ModelAdmin method in list_display that has the
same name as a reverse model field ("sketch" in this case).
"""
actor = Actor.objects.create(name="Palin", age=27)
Inquisition.objects.create(expected=True, leader=actor, country="England")
response = self.client.get(reverse("admin:admin_views_inquisition_changelist"))
self.assertContains(response, "list-display-sketch")
@override_settings(ROOT_URLCONF="admin_views.urls")
class UserAdminTest(TestCase):
"""
Tests user CRUD functionality.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.adduser = User.objects.create_user(
username="adduser", password="secret", is_staff=True
)
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.per1 = Person.objects.create(name="John Mauchly", gender=1, alive=True)
cls.per2 = Person.objects.create(name="Grace Hopper", gender=1, alive=False)
cls.per3 = Person.objects.create(name="Guido van Rossum", gender=1, alive=True)
def setUp(self):
self.client.force_login(self.superuser)
def test_save_button(self):
user_count = User.objects.count()
response = self.client.post(
reverse("admin:auth_user_add"),
{
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
},
)
new_user = User.objects.get(username="newuser")
self.assertRedirects(
response, reverse("admin:auth_user_change", args=(new_user.pk,))
)
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
def test_save_continue_editing_button(self):
user_count = User.objects.count()
response = self.client.post(
reverse("admin:auth_user_add"),
{
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
"_continue": "1",
},
)
new_user = User.objects.get(username="newuser")
new_user_url = reverse("admin:auth_user_change", args=(new_user.pk,))
self.assertRedirects(response, new_user_url, fetch_redirect_response=False)
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
response = self.client.get(new_user_url)
self.assertContains(
response,
'<li class="success">The user “<a href="%s">'
"%s</a>” was added successfully. You may edit it again below.</li>"
% (new_user_url, new_user),
html=True,
)
def test_password_mismatch(self):
response = self.client.post(
reverse("admin:auth_user_add"),
{
"username": "newuser",
"password1": "newpassword",
"password2": "mismatch",
},
)
self.assertEqual(response.status_code, 200)
self.assertFormError(response.context["adminform"], "password1", [])
self.assertFormError(
response.context["adminform"],
"password2",
["The two password fields didn’t match."],
)
def test_user_fk_add_popup(self):
"""
User addition through a FK popup should return the appropriate
JavaScript response.
"""
response = self.client.get(reverse("admin:admin_views_album_add"))
self.assertContains(response, reverse("admin:auth_user_add"))
self.assertContains(
response,
'class="related-widget-wrapper-link add-related" id="add_id_owner"',
)
response = self.client.get(
reverse("admin:auth_user_add") + "?%s=1" % IS_POPUP_VAR
)
self.assertNotContains(response, 'name="_continue"')
self.assertNotContains(response, 'name="_addanother"')
data = {
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
IS_POPUP_VAR: "1",
"_save": "1",
}
response = self.client.post(
reverse("admin:auth_user_add") + "?%s=1" % IS_POPUP_VAR, data, follow=True
)
self.assertContains(response, ""obj": "newuser"")
def test_user_fk_change_popup(self):
"""
User change through a FK popup should return the appropriate JavaScript
response.
"""
response = self.client.get(reverse("admin:admin_views_album_add"))
self.assertContains(
response, reverse("admin:auth_user_change", args=("__fk__",))
)
self.assertContains(
response,
'class="related-widget-wrapper-link change-related" id="change_id_owner"',
)
user = User.objects.get(username="changeuser")
url = (
reverse("admin:auth_user_change", args=(user.pk,)) + "?%s=1" % IS_POPUP_VAR
)
response = self.client.get(url)
self.assertNotContains(response, 'name="_continue"')
self.assertNotContains(response, 'name="_addanother"')
data = {
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
"last_login_0": "2007-05-30",
"last_login_1": "13:20:10",
"date_joined_0": "2007-05-30",
"date_joined_1": "13:20:10",
IS_POPUP_VAR: "1",
"_save": "1",
}
response = self.client.post(url, data, follow=True)
self.assertContains(response, ""obj": "newuser"")
self.assertContains(response, ""action": "change"")
def test_user_fk_delete_popup(self):
"""
User deletion through a FK popup should return the appropriate
JavaScript response.
"""
response = self.client.get(reverse("admin:admin_views_album_add"))
self.assertContains(
response, reverse("admin:auth_user_delete", args=("__fk__",))
)
self.assertContains(
response,
'class="related-widget-wrapper-link change-related" id="change_id_owner"',
)
user = User.objects.get(username="changeuser")
url = (
reverse("admin:auth_user_delete", args=(user.pk,)) + "?%s=1" % IS_POPUP_VAR
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = {
"post": "yes",
IS_POPUP_VAR: "1",
}
response = self.client.post(url, data, follow=True)
self.assertContains(response, ""action": "delete"")
def test_save_add_another_button(self):
user_count = User.objects.count()
response = self.client.post(
reverse("admin:auth_user_add"),
{
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
"_addanother": "1",
},
)
new_user = User.objects.order_by("-id")[0]
self.assertRedirects(response, reverse("admin:auth_user_add"))
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
def test_user_permission_performance(self):
u = User.objects.all()[0]
# Don't depend on a warm cache, see #17377.
ContentType.objects.clear_cache()
expected_num_queries = 10 if connection.features.uses_savepoints else 8
with self.assertNumQueries(expected_num_queries):
response = self.client.get(reverse("admin:auth_user_change", args=(u.pk,)))
self.assertEqual(response.status_code, 200)
def test_form_url_present_in_context(self):
u = User.objects.all()[0]
response = self.client.get(
reverse("admin3:auth_user_password_change", args=(u.pk,))
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["form_url"], "pony")
@override_settings(ROOT_URLCONF="admin_views.urls")
class GroupAdminTest(TestCase):
"""
Tests group CRUD functionality.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_save_button(self):
group_count = Group.objects.count()
response = self.client.post(
reverse("admin:auth_group_add"),
{
"name": "newgroup",
},
)
Group.objects.order_by("-id")[0]
self.assertRedirects(response, reverse("admin:auth_group_changelist"))
self.assertEqual(Group.objects.count(), group_count + 1)
def test_group_permission_performance(self):
g = Group.objects.create(name="test_group")
# Ensure no queries are skipped due to cached content type for Group.
ContentType.objects.clear_cache()
expected_num_queries = 8 if connection.features.uses_savepoints else 6
with self.assertNumQueries(expected_num_queries):
response = self.client.get(reverse("admin:auth_group_change", args=(g.pk,)))
self.assertEqual(response.status_code, 200)
@override_settings(ROOT_URLCONF="admin_views.urls")
class CSSTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_field_prefix_css_classes(self):
"""
Fields have a CSS class name with a 'field-' prefix.
"""
response = self.client.get(reverse("admin:admin_views_post_add"))
# The main form
self.assertContains(response, 'class="form-row field-title"')
self.assertContains(response, 'class="form-row field-content"')
self.assertContains(response, 'class="form-row field-public"')
self.assertContains(response, 'class="form-row field-awesomeness_level"')
self.assertContains(response, 'class="form-row field-coolness"')
self.assertContains(response, 'class="form-row field-value"')
self.assertContains(response, 'class="form-row"') # The lambda function
# The tabular inline
self.assertContains(response, '<td class="field-url">')
self.assertContains(response, '<td class="field-posted">')
def test_index_css_classes(self):
"""
CSS class names are used for each app and model on the admin index
pages (#17050).
"""
# General index page
response = self.client.get(reverse("admin:index"))
self.assertContains(response, '<div class="app-admin_views module')
self.assertContains(response, '<tr class="model-actor">')
self.assertContains(response, '<tr class="model-album">')
# App index page
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertContains(response, '<div class="app-admin_views module')
self.assertContains(response, '<tr class="model-actor">')
self.assertContains(response, '<tr class="model-album">')
def test_app_model_in_form_body_class(self):
"""
Ensure app and model tag are correctly read by change_form template
"""
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertContains(response, '<body class=" app-admin_views model-section ')
def test_app_model_in_list_body_class(self):
"""
Ensure app and model tag are correctly read by change_list template
"""
response = self.client.get(reverse("admin:admin_views_section_changelist"))
self.assertContains(response, '<body class=" app-admin_views model-section ')
def test_app_model_in_delete_confirmation_body_class(self):
"""
Ensure app and model tag are correctly read by delete_confirmation
template
"""
response = self.client.get(
reverse("admin:admin_views_section_delete", args=(self.s1.pk,))
)
self.assertContains(response, '<body class=" app-admin_views model-section ')
def test_app_model_in_app_index_body_class(self):
"""
Ensure app and model tag are correctly read by app_index template
"""
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertContains(response, '<body class=" dashboard app-admin_views')
def test_app_model_in_delete_selected_confirmation_body_class(self):
"""
Ensure app and model tag are correctly read by
delete_selected_confirmation template
"""
action_data = {
ACTION_CHECKBOX_NAME: [self.s1.pk],
"action": "delete_selected",
"index": 0,
}
response = self.client.post(
reverse("admin:admin_views_section_changelist"), action_data
)
self.assertContains(response, '<body class=" app-admin_views model-section ')
def test_changelist_field_classes(self):
"""
Cells of the change list table should contain the field name in their
class attribute.
"""
Podcast.objects.create(name="Django Dose", release_date=datetime.date.today())
response = self.client.get(reverse("admin:admin_views_podcast_changelist"))
self.assertContains(response, '<th class="field-name">')
self.assertContains(response, '<td class="field-release_date nowrap">')
self.assertContains(response, '<td class="action-checkbox">')
try:
import docutils
except ImportError:
docutils = None
@unittest.skipUnless(docutils, "no docutils installed.")
@override_settings(ROOT_URLCONF="admin_views.urls")
@modify_settings(
INSTALLED_APPS={"append": ["django.contrib.admindocs", "django.contrib.flatpages"]}
)
class AdminDocsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_tags(self):
response = self.client.get(reverse("django-admindocs-tags"))
# The builtin tag group exists
self.assertContains(response, "<h2>Built-in tags</h2>", count=2, html=True)
# A builtin tag exists in both the index and detail
self.assertContains(
response, '<h3 id="built_in-autoescape">autoescape</h3>', html=True
)
self.assertContains(
response,
'<li><a href="#built_in-autoescape">autoescape</a></li>',
html=True,
)
# An app tag exists in both the index and detail
self.assertContains(
response, '<h3 id="flatpages-get_flatpages">get_flatpages</h3>', html=True
)
self.assertContains(
response,
'<li><a href="#flatpages-get_flatpages">get_flatpages</a></li>',
html=True,
)
# The admin list tag group exists
self.assertContains(response, "<h2>admin_list</h2>", count=2, html=True)
# An admin list tag exists in both the index and detail
self.assertContains(
response, '<h3 id="admin_list-admin_actions">admin_actions</h3>', html=True
)
self.assertContains(
response,
'<li><a href="#admin_list-admin_actions">admin_actions</a></li>',
html=True,
)
def test_filters(self):
response = self.client.get(reverse("django-admindocs-filters"))
# The builtin filter group exists
self.assertContains(response, "<h2>Built-in filters</h2>", count=2, html=True)
# A builtin filter exists in both the index and detail
self.assertContains(response, '<h3 id="built_in-add">add</h3>', html=True)
self.assertContains(
response, '<li><a href="#built_in-add">add</a></li>', html=True
)
@override_settings(
ROOT_URLCONF="admin_views.urls",
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
class ValidXHTMLTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_lang_name_present(self):
with translation.override(None):
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertNotContains(response, ' lang=""')
self.assertNotContains(response, ' xml:lang=""')
@override_settings(ROOT_URLCONF="admin_views.urls", USE_THOUSAND_SEPARATOR=True)
class DateHierarchyTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def assert_non_localized_year(self, response, year):
"""
The year is not localized with USE_THOUSAND_SEPARATOR (#15234).
"""
self.assertNotContains(response, formats.number_format(year))
def assert_contains_year_link(self, response, date):
self.assertContains(response, '?release_date__year=%d"' % date.year)
def assert_contains_month_link(self, response, date):
self.assertContains(
response,
'?release_date__month=%d&release_date__year=%d"'
% (date.month, date.year),
)
def assert_contains_day_link(self, response, date):
self.assertContains(
response,
"?release_date__day=%d&"
'release_date__month=%d&release_date__year=%d"'
% (date.day, date.month, date.year),
)
def test_empty(self):
"""
No date hierarchy links display with empty changelist.
"""
response = self.client.get(reverse("admin:admin_views_podcast_changelist"))
self.assertNotContains(response, "release_date__year=")
self.assertNotContains(response, "release_date__month=")
self.assertNotContains(response, "release_date__day=")
def test_single(self):
"""
Single day-level date hierarchy appears for single object.
"""
DATE = datetime.date(2000, 6, 30)
Podcast.objects.create(release_date=DATE)
url = reverse("admin:admin_views_podcast_changelist")
response = self.client.get(url)
self.assert_contains_day_link(response, DATE)
self.assert_non_localized_year(response, 2000)
def test_within_month(self):
"""
day-level links appear for changelist within single month.
"""
DATES = (
datetime.date(2000, 6, 30),
datetime.date(2000, 6, 15),
datetime.date(2000, 6, 3),
)
for date in DATES:
Podcast.objects.create(release_date=date)
url = reverse("admin:admin_views_podcast_changelist")
response = self.client.get(url)
for date in DATES:
self.assert_contains_day_link(response, date)
self.assert_non_localized_year(response, 2000)
def test_within_year(self):
"""
month-level links appear for changelist within single year.
"""
DATES = (
datetime.date(2000, 1, 30),
datetime.date(2000, 3, 15),
datetime.date(2000, 5, 3),
)
for date in DATES:
Podcast.objects.create(release_date=date)
url = reverse("admin:admin_views_podcast_changelist")
response = self.client.get(url)
# no day-level links
self.assertNotContains(response, "release_date__day=")
for date in DATES:
self.assert_contains_month_link(response, date)
self.assert_non_localized_year(response, 2000)
def test_multiple_years(self):
"""
year-level links appear for year-spanning changelist.
"""
DATES = (
datetime.date(2001, 1, 30),
datetime.date(2003, 3, 15),
datetime.date(2005, 5, 3),
)
for date in DATES:
Podcast.objects.create(release_date=date)
response = self.client.get(reverse("admin:admin_views_podcast_changelist"))
# no day/month-level links
self.assertNotContains(response, "release_date__day=")
self.assertNotContains(response, "release_date__month=")
for date in DATES:
self.assert_contains_year_link(response, date)
# and make sure GET parameters still behave correctly
for date in DATES:
url = "%s?release_date__year=%d" % (
reverse("admin:admin_views_podcast_changelist"),
date.year,
)
response = self.client.get(url)
self.assert_contains_month_link(response, date)
self.assert_non_localized_year(response, 2000)
self.assert_non_localized_year(response, 2003)
self.assert_non_localized_year(response, 2005)
url = "%s?release_date__year=%d&release_date__month=%d" % (
reverse("admin:admin_views_podcast_changelist"),
date.year,
date.month,
)
response = self.client.get(url)
self.assert_contains_day_link(response, date)
self.assert_non_localized_year(response, 2000)
self.assert_non_localized_year(response, 2003)
self.assert_non_localized_year(response, 2005)
def test_related_field(self):
questions_data = (
# (posted data, number of answers),
(datetime.date(2001, 1, 30), 0),
(datetime.date(2003, 3, 15), 1),
(datetime.date(2005, 5, 3), 2),
)
for date, answer_count in questions_data:
question = Question.objects.create(posted=date)
for i in range(answer_count):
question.answer_set.create()
response = self.client.get(reverse("admin:admin_views_answer_changelist"))
for date, answer_count in questions_data:
link = '?question__posted__year=%d"' % date.year
if answer_count > 0:
self.assertContains(response, link)
else:
self.assertNotContains(response, link)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminCustomSaveRelatedTests(TestCase):
"""
One can easily customize the way related objects are saved.
Refs #16115.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_should_be_able_to_edit_related_objects_on_add_view(self):
post = {
"child_set-TOTAL_FORMS": "3",
"child_set-INITIAL_FORMS": "0",
"name": "Josh Stone",
"child_set-0-name": "Paul",
"child_set-1-name": "Catherine",
}
self.client.post(reverse("admin:admin_views_parent_add"), post)
self.assertEqual(1, Parent.objects.count())
self.assertEqual(2, Child.objects.count())
children_names = list(
Child.objects.order_by("name").values_list("name", flat=True)
)
self.assertEqual("Josh Stone", Parent.objects.latest("id").name)
self.assertEqual(["Catherine Stone", "Paul Stone"], children_names)
def test_should_be_able_to_edit_related_objects_on_change_view(self):
parent = Parent.objects.create(name="Josh Stone")
paul = Child.objects.create(parent=parent, name="Paul")
catherine = Child.objects.create(parent=parent, name="Catherine")
post = {
"child_set-TOTAL_FORMS": "5",
"child_set-INITIAL_FORMS": "2",
"name": "Josh Stone",
"child_set-0-name": "Paul",
"child_set-0-id": paul.id,
"child_set-1-name": "Catherine",
"child_set-1-id": catherine.id,
}
self.client.post(
reverse("admin:admin_views_parent_change", args=(parent.id,)), post
)
children_names = list(
Child.objects.order_by("name").values_list("name", flat=True)
)
self.assertEqual("Josh Stone", Parent.objects.latest("id").name)
self.assertEqual(["Catherine Stone", "Paul Stone"], children_names)
def test_should_be_able_to_edit_related_objects_on_changelist_view(self):
parent = Parent.objects.create(name="Josh Rock")
Child.objects.create(parent=parent, name="Paul")
Child.objects.create(parent=parent, name="Catherine")
post = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": parent.id,
"form-0-name": "Josh Stone",
"_save": "Save",
}
self.client.post(reverse("admin:admin_views_parent_changelist"), post)
children_names = list(
Child.objects.order_by("name").values_list("name", flat=True)
)
self.assertEqual("Josh Stone", Parent.objects.latest("id").name)
self.assertEqual(["Catherine Stone", "Paul Stone"], children_names)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewLogoutTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def test_logout(self):
self.client.force_login(self.superuser)
response = self.client.post(reverse("admin:logout"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "registration/logged_out.html")
self.assertEqual(response.request["PATH_INFO"], reverse("admin:logout"))
self.assertFalse(response.context["has_permission"])
self.assertNotContains(
response, "user-tools"
) # user-tools div shouldn't visible.
def test_client_logout_url_can_be_used_to_login(self):
response = self.client.post(reverse("admin:logout"))
self.assertEqual(
response.status_code, 302
) # we should be redirected to the login page.
# follow the redirect and test results.
response = self.client.post(reverse("admin:logout"), follow=True)
self.assertContains(
response,
'<input type="hidden" name="next" value="%s">' % reverse("admin:index"),
)
self.assertTemplateUsed(response, "admin/login.html")
self.assertEqual(response.request["PATH_INFO"], reverse("admin:login"))
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminUserMessageTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def send_message(self, level):
"""
Helper that sends a post to the dummy test methods and asserts that a
message with the level has appeared in the response.
"""
action_data = {
ACTION_CHECKBOX_NAME: [1],
"action": "message_%s" % level,
"index": 0,
}
response = self.client.post(
reverse("admin:admin_views_usermessenger_changelist"),
action_data,
follow=True,
)
self.assertContains(
response, '<li class="%s">Test %s</li>' % (level, level), html=True
)
@override_settings(MESSAGE_LEVEL=10) # Set to DEBUG for this request
def test_message_debug(self):
self.send_message("debug")
def test_message_info(self):
self.send_message("info")
def test_message_success(self):
self.send_message("success")
def test_message_warning(self):
self.send_message("warning")
def test_message_error(self):
self.send_message("error")
def test_message_extra_tags(self):
action_data = {
ACTION_CHECKBOX_NAME: [1],
"action": "message_extra_tags",
"index": 0,
}
response = self.client.post(
reverse("admin:admin_views_usermessenger_changelist"),
action_data,
follow=True,
)
self.assertContains(
response, '<li class="extra_tag info">Test tags</li>', html=True
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminKeepChangeListFiltersTests(TestCase):
admin_site = site
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.joepublicuser = User.objects.create_user(
username="joepublic", password="secret"
)
def setUp(self):
self.client.force_login(self.superuser)
def assertURLEqual(self, url1, url2, msg_prefix=""):
"""
Assert that two URLs are equal despite the ordering
of their querystring. Refs #22360.
"""
parsed_url1 = urlparse(url1)
path1 = parsed_url1.path
parsed_qs1 = dict(parse_qsl(parsed_url1.query))
parsed_url2 = urlparse(url2)
path2 = parsed_url2.path
parsed_qs2 = dict(parse_qsl(parsed_url2.query))
for parsed_qs in [parsed_qs1, parsed_qs2]:
if "_changelist_filters" in parsed_qs:
changelist_filters = parsed_qs["_changelist_filters"]
parsed_filters = dict(parse_qsl(changelist_filters))
parsed_qs["_changelist_filters"] = parsed_filters
self.assertEqual(path1, path2)
self.assertEqual(parsed_qs1, parsed_qs2)
def test_assert_url_equal(self):
# Test equality.
change_user_url = reverse(
"admin:auth_user_change", args=(self.joepublicuser.pk,)
)
self.assertURLEqual(
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
)
# Test inequality.
with self.assertRaises(AssertionError):
self.assertURLEqual(
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D1%26is_superuser__exact%3D1".format(change_user_url),
)
# Ignore scheme and host.
self.assertURLEqual(
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
"{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
)
# Ignore ordering of querystring.
self.assertURLEqual(
"{}?is_staff__exact=0&is_superuser__exact=0".format(
reverse("admin:auth_user_changelist")
),
"{}?is_superuser__exact=0&is_staff__exact=0".format(
reverse("admin:auth_user_changelist")
),
)
# Ignore ordering of _changelist_filters.
self.assertURLEqual(
"{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
"{}?_changelist_filters="
"is_superuser__exact%3D0%26is_staff__exact%3D0".format(change_user_url),
)
def get_changelist_filters(self):
return {
"is_superuser__exact": 0,
"is_staff__exact": 0,
}
def get_changelist_filters_querystring(self):
return urlencode(self.get_changelist_filters())
def get_preserved_filters_querystring(self):
return urlencode(
{"_changelist_filters": self.get_changelist_filters_querystring()}
)
def get_sample_user_id(self):
return self.joepublicuser.pk
def get_changelist_url(self):
return "%s?%s" % (
reverse("admin:auth_user_changelist", current_app=self.admin_site.name),
self.get_changelist_filters_querystring(),
)
def get_add_url(self, add_preserved_filters=True):
url = reverse("admin:auth_user_add", current_app=self.admin_site.name)
if add_preserved_filters:
url = "%s?%s" % (url, self.get_preserved_filters_querystring())
return url
def get_change_url(self, user_id=None, add_preserved_filters=True):
if user_id is None:
user_id = self.get_sample_user_id()
url = reverse(
"admin:auth_user_change", args=(user_id,), current_app=self.admin_site.name
)
if add_preserved_filters:
url = "%s?%s" % (url, self.get_preserved_filters_querystring())
return url
def get_history_url(self, user_id=None):
if user_id is None:
user_id = self.get_sample_user_id()
return "%s?%s" % (
reverse(
"admin:auth_user_history",
args=(user_id,),
current_app=self.admin_site.name,
),
self.get_preserved_filters_querystring(),
)
def get_delete_url(self, user_id=None):
if user_id is None:
user_id = self.get_sample_user_id()
return "%s?%s" % (
reverse(
"admin:auth_user_delete",
args=(user_id,),
current_app=self.admin_site.name,
),
self.get_preserved_filters_querystring(),
)
def test_changelist_view(self):
response = self.client.get(self.get_changelist_url())
self.assertEqual(response.status_code, 200)
# Check the `change_view` link has the correct querystring.
detail_link = re.search(
'<a href="(.*?)">{}</a>'.format(self.joepublicuser.username),
response.content.decode(),
)
self.assertURLEqual(detail_link[1], self.get_change_url())
def test_change_view(self):
# Get the `change_view`.
response = self.client.get(self.get_change_url())
self.assertEqual(response.status_code, 200)
# Check the form action.
form_action = re.search(
'<form action="(.*?)" method="post" id="user_form" novalidate>',
response.content.decode(),
)
self.assertURLEqual(
form_action[1], "?%s" % self.get_preserved_filters_querystring()
)
# Check the history link.
history_link = re.search(
'<a href="(.*?)" class="historylink">History</a>', response.content.decode()
)
self.assertURLEqual(history_link[1], self.get_history_url())
# Check the delete link.
delete_link = re.search(
'<a href="(.*?)" class="deletelink">Delete</a>', response.content.decode()
)
self.assertURLEqual(delete_link[1], self.get_delete_url())
# Test redirect on "Save".
post_data = {
"username": "joepublic",
"last_login_0": "2007-05-30",
"last_login_1": "13:20:10",
"date_joined_0": "2007-05-30",
"date_joined_1": "13:20:10",
}
post_data["_save"] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertRedirects(response, self.get_changelist_url())
post_data.pop("_save")
# Test redirect on "Save and continue".
post_data["_continue"] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertRedirects(response, self.get_change_url())
post_data.pop("_continue")
# Test redirect on "Save and add new".
post_data["_addanother"] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertRedirects(response, self.get_add_url())
post_data.pop("_addanother")
def test_change_view_without_preserved_filters(self):
response = self.client.get(self.get_change_url(add_preserved_filters=False))
# The action attribute is omitted.
self.assertContains(response, '<form method="post" id="user_form" novalidate>')
def test_add_view(self):
# Get the `add_view`.
response = self.client.get(self.get_add_url())
self.assertEqual(response.status_code, 200)
# Check the form action.
form_action = re.search(
'<form action="(.*?)" method="post" id="user_form" novalidate>',
response.content.decode(),
)
self.assertURLEqual(
form_action[1], "?%s" % self.get_preserved_filters_querystring()
)
post_data = {
"username": "dummy",
"password1": "test",
"password2": "test",
}
# Test redirect on "Save".
post_data["_save"] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertRedirects(
response, self.get_change_url(User.objects.get(username="dummy").pk)
)
post_data.pop("_save")
# Test redirect on "Save and continue".
post_data["username"] = "dummy2"
post_data["_continue"] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertRedirects(
response, self.get_change_url(User.objects.get(username="dummy2").pk)
)
post_data.pop("_continue")
# Test redirect on "Save and add new".
post_data["username"] = "dummy3"
post_data["_addanother"] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertRedirects(response, self.get_add_url())
post_data.pop("_addanother")
def test_add_view_without_preserved_filters(self):
response = self.client.get(self.get_add_url(add_preserved_filters=False))
# The action attribute is omitted.
self.assertContains(response, '<form method="post" id="user_form" novalidate>')
def test_delete_view(self):
# Test redirect on "Delete".
response = self.client.post(self.get_delete_url(), {"post": "yes"})
self.assertRedirects(response, self.get_changelist_url())
def test_url_prefix(self):
context = {
"preserved_filters": self.get_preserved_filters_querystring(),
"opts": User._meta,
}
prefixes = ("", "/prefix/", "/後台/")
for prefix in prefixes:
with self.subTest(prefix=prefix), override_script_prefix(prefix):
url = reverse(
"admin:auth_user_changelist", current_app=self.admin_site.name
)
self.assertURLEqual(
self.get_changelist_url(),
add_preserved_filters(context, url),
)
class NamespacedAdminKeepChangeListFiltersTests(AdminKeepChangeListFiltersTests):
admin_site = site2
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestLabelVisibility(TestCase):
"""#11277 -Labels of hidden fields in admin were not hidden."""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_all_fields_visible(self):
response = self.client.get(reverse("admin:admin_views_emptymodelvisible_add"))
self.assert_fieldline_visible(response)
self.assert_field_visible(response, "first")
self.assert_field_visible(response, "second")
def test_all_fields_hidden(self):
response = self.client.get(reverse("admin:admin_views_emptymodelhidden_add"))
self.assert_fieldline_hidden(response)
self.assert_field_hidden(response, "first")
self.assert_field_hidden(response, "second")
def test_mixin(self):
response = self.client.get(reverse("admin:admin_views_emptymodelmixin_add"))
self.assert_fieldline_visible(response)
self.assert_field_hidden(response, "first")
self.assert_field_visible(response, "second")
def assert_field_visible(self, response, field_name):
self.assertContains(response, '<div class="fieldBox field-%s">' % field_name)
def assert_field_hidden(self, response, field_name):
self.assertContains(
response, '<div class="fieldBox field-%s hidden">' % field_name
)
def assert_fieldline_visible(self, response):
self.assertContains(response, '<div class="form-row field-first field-second">')
def assert_fieldline_hidden(self, response):
self.assertContains(response, '<div class="form-row hidden')
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewOnSiteTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = State.objects.create(name="New York")
cls.s2 = State.objects.create(name="Illinois")
cls.s3 = State.objects.create(name="California")
cls.c1 = City.objects.create(state=cls.s1, name="New York")
cls.c2 = City.objects.create(state=cls.s2, name="Chicago")
cls.c3 = City.objects.create(state=cls.s3, name="San Francisco")
cls.r1 = Restaurant.objects.create(city=cls.c1, name="Italian Pizza")
cls.r2 = Restaurant.objects.create(city=cls.c1, name="Boulevard")
cls.r3 = Restaurant.objects.create(city=cls.c2, name="Chinese Dinner")
cls.r4 = Restaurant.objects.create(city=cls.c2, name="Angels")
cls.r5 = Restaurant.objects.create(city=cls.c2, name="Take Away")
cls.r6 = Restaurant.objects.create(city=cls.c3, name="The Unknown Restaurant")
cls.w1 = Worker.objects.create(work_at=cls.r1, name="Mario", surname="Rossi")
cls.w2 = Worker.objects.create(
work_at=cls.r1, name="Antonio", surname="Bianchi"
)
cls.w3 = Worker.objects.create(work_at=cls.r1, name="John", surname="Doe")
def setUp(self):
self.client.force_login(self.superuser)
def test_add_view_form_and_formsets_run_validation(self):
"""
Issue #20522
Verifying that if the parent form fails validation, the inlines also
run validation even if validation is contingent on parent form data.
Also, assertFormError() and assertFormsetError() is usable for admin
forms and formsets.
"""
# The form validation should fail because 'some_required_info' is
# not included on the parent form, and the family_name of the parent
# does not match that of the child
post_data = {
"family_name": "Test1",
"dependentchild_set-TOTAL_FORMS": "1",
"dependentchild_set-INITIAL_FORMS": "0",
"dependentchild_set-MAX_NUM_FORMS": "1",
"dependentchild_set-0-id": "",
"dependentchild_set-0-parent": "",
"dependentchild_set-0-family_name": "Test2",
}
response = self.client.post(
reverse("admin:admin_views_parentwithdependentchildren_add"), post_data
)
self.assertFormError(
response.context["adminform"],
"some_required_info",
["This field is required."],
)
self.assertFormError(response.context["adminform"], None, [])
self.assertFormsetError(
response.context["inline_admin_formset"],
0,
None,
[
"Children must share a family name with their parents in this "
"contrived test case"
],
)
self.assertFormsetError(
response.context["inline_admin_formset"], None, None, []
)
def test_change_view_form_and_formsets_run_validation(self):
"""
Issue #20522
Verifying that if the parent form fails validation, the inlines also
run validation even if validation is contingent on parent form data
"""
pwdc = ParentWithDependentChildren.objects.create(
some_required_info=6, family_name="Test1"
)
# The form validation should fail because 'some_required_info' is
# not included on the parent form, and the family_name of the parent
# does not match that of the child
post_data = {
"family_name": "Test2",
"dependentchild_set-TOTAL_FORMS": "1",
"dependentchild_set-INITIAL_FORMS": "0",
"dependentchild_set-MAX_NUM_FORMS": "1",
"dependentchild_set-0-id": "",
"dependentchild_set-0-parent": str(pwdc.id),
"dependentchild_set-0-family_name": "Test1",
}
response = self.client.post(
reverse(
"admin:admin_views_parentwithdependentchildren_change", args=(pwdc.id,)
),
post_data,
)
self.assertFormError(
response.context["adminform"],
"some_required_info",
["This field is required."],
)
self.assertFormsetError(
response.context["inline_admin_formset"],
0,
None,
[
"Children must share a family name with their parents in this "
"contrived test case"
],
)
def test_check(self):
"The view_on_site value is either a boolean or a callable"
try:
admin = CityAdmin(City, AdminSite())
CityAdmin.view_on_site = True
self.assertEqual(admin.check(), [])
CityAdmin.view_on_site = False
self.assertEqual(admin.check(), [])
CityAdmin.view_on_site = lambda obj: obj.get_absolute_url()
self.assertEqual(admin.check(), [])
CityAdmin.view_on_site = []
self.assertEqual(
admin.check(),
[
Error(
"The value of 'view_on_site' must be a callable or a boolean "
"value.",
obj=CityAdmin,
id="admin.E025",
),
],
)
finally:
# Restore the original values for the benefit of other tests.
CityAdmin.view_on_site = True
def test_false(self):
"The 'View on site' button is not displayed if view_on_site is False"
response = self.client.get(
reverse("admin:admin_views_restaurant_change", args=(self.r1.pk,))
)
content_type_pk = ContentType.objects.get_for_model(Restaurant).pk
self.assertNotContains(
response, reverse("admin:view_on_site", args=(content_type_pk, 1))
)
def test_true(self):
"The default behavior is followed if view_on_site is True"
response = self.client.get(
reverse("admin:admin_views_city_change", args=(self.c1.pk,))
)
content_type_pk = ContentType.objects.get_for_model(City).pk
self.assertContains(
response, reverse("admin:view_on_site", args=(content_type_pk, self.c1.pk))
)
def test_callable(self):
"The right link is displayed if view_on_site is a callable"
response = self.client.get(
reverse("admin:admin_views_worker_change", args=(self.w1.pk,))
)
self.assertContains(
response, '"/worker/%s/%s/"' % (self.w1.surname, self.w1.name)
)
def test_missing_get_absolute_url(self):
"None is returned if model doesn't have get_absolute_url"
model_admin = ModelAdmin(Worker, None)
self.assertIsNone(model_admin.get_view_on_site_url(Worker()))
def test_custom_admin_site(self):
model_admin = ModelAdmin(City, customadmin.site)
content_type_pk = ContentType.objects.get_for_model(City).pk
redirect_url = model_admin.get_view_on_site_url(self.c1)
self.assertEqual(
redirect_url,
reverse(
f"{customadmin.site.name}:view_on_site",
kwargs={
"content_type_id": content_type_pk,
"object_id": self.c1.pk,
},
),
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class InlineAdminViewOnSiteTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = State.objects.create(name="New York")
cls.s2 = State.objects.create(name="Illinois")
cls.s3 = State.objects.create(name="California")
cls.c1 = City.objects.create(state=cls.s1, name="New York")
cls.c2 = City.objects.create(state=cls.s2, name="Chicago")
cls.c3 = City.objects.create(state=cls.s3, name="San Francisco")
cls.r1 = Restaurant.objects.create(city=cls.c1, name="Italian Pizza")
cls.r2 = Restaurant.objects.create(city=cls.c1, name="Boulevard")
cls.r3 = Restaurant.objects.create(city=cls.c2, name="Chinese Dinner")
cls.r4 = Restaurant.objects.create(city=cls.c2, name="Angels")
cls.r5 = Restaurant.objects.create(city=cls.c2, name="Take Away")
cls.r6 = Restaurant.objects.create(city=cls.c3, name="The Unknown Restaurant")
cls.w1 = Worker.objects.create(work_at=cls.r1, name="Mario", surname="Rossi")
cls.w2 = Worker.objects.create(
work_at=cls.r1, name="Antonio", surname="Bianchi"
)
cls.w3 = Worker.objects.create(work_at=cls.r1, name="John", surname="Doe")
def setUp(self):
self.client.force_login(self.superuser)
def test_false(self):
"The 'View on site' button is not displayed if view_on_site is False"
response = self.client.get(
reverse("admin:admin_views_state_change", args=(self.s1.pk,))
)
content_type_pk = ContentType.objects.get_for_model(City).pk
self.assertNotContains(
response, reverse("admin:view_on_site", args=(content_type_pk, self.c1.pk))
)
def test_true(self):
"The 'View on site' button is displayed if view_on_site is True"
response = self.client.get(
reverse("admin:admin_views_city_change", args=(self.c1.pk,))
)
content_type_pk = ContentType.objects.get_for_model(Restaurant).pk
self.assertContains(
response, reverse("admin:view_on_site", args=(content_type_pk, self.r1.pk))
)
def test_callable(self):
"The right link is displayed if view_on_site is a callable"
response = self.client.get(
reverse("admin:admin_views_restaurant_change", args=(self.r1.pk,))
)
self.assertContains(
response, '"/worker_inline/%s/%s/"' % (self.w1.surname, self.w1.name)
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class GetFormsetsWithInlinesArgumentTest(TestCase):
"""
#23934 - When adding a new model instance in the admin, the 'obj' argument
of get_formsets_with_inlines() should be None. When changing, it should be
equal to the existing model instance.
The GetFormsetsArgumentCheckingAdmin ModelAdmin throws an exception
if obj is not None during add_view or obj is None during change_view.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_explicitly_provided_pk(self):
post_data = {"name": "1"}
response = self.client.post(
reverse("admin:admin_views_explicitlyprovidedpk_add"), post_data
)
self.assertEqual(response.status_code, 302)
post_data = {"name": "2"}
response = self.client.post(
reverse("admin:admin_views_explicitlyprovidedpk_change", args=(1,)),
post_data,
)
self.assertEqual(response.status_code, 302)
def test_implicitly_generated_pk(self):
post_data = {"name": "1"}
response = self.client.post(
reverse("admin:admin_views_implicitlygeneratedpk_add"), post_data
)
self.assertEqual(response.status_code, 302)
post_data = {"name": "2"}
response = self.client.post(
reverse("admin:admin_views_implicitlygeneratedpk_change", args=(1,)),
post_data,
)
self.assertEqual(response.status_code, 302)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminSiteFinalCatchAllPatternTests(TestCase):
"""
Verifies the behaviour of the admin catch-all view.
* Anonynous/non-staff users are redirected to login for all URLs, whether
otherwise valid or not.
* APPEND_SLASH is applied for staff if needed.
* Otherwise Http404.
* Catch-all view disabled via AdminSite.final_catch_all_view.
"""
def test_unknown_url_redirects_login_if_not_authenticated(self):
unknown_url = "/test_admin/admin/unknown/"
response = self.client.get(unknown_url)
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin:login"), unknown_url)
)
def test_unknown_url_404_if_authenticated(self):
superuser = User.objects.create_superuser(
username="super",
password="secret",
email="[email protected]",
)
self.client.force_login(superuser)
unknown_url = "/test_admin/admin/unknown/"
response = self.client.get(unknown_url)
self.assertEqual(response.status_code, 404)
def test_known_url_redirects_login_if_not_authenticated(self):
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url)
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin:login"), known_url)
)
def test_known_url_missing_slash_redirects_login_if_not_authenticated(self):
known_url = reverse("admin:admin_views_article_changelist")[:-1]
response = self.client.get(known_url)
# Redirects with the next URL also missing the slash.
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin:login"), known_url)
)
def test_non_admin_url_shares_url_prefix(self):
url = reverse("non_admin")[:-1]
response = self.client.get(url)
# Redirects with the next URL also missing the slash.
self.assertRedirects(response, "%s?next=%s" % (reverse("admin:login"), url))
def test_url_without_trailing_slash_if_not_authenticated(self):
url = reverse("admin:article_extra_json")
response = self.client.get(url)
self.assertRedirects(response, "%s?next=%s" % (reverse("admin:login"), url))
def test_unkown_url_without_trailing_slash_if_not_authenticated(self):
url = reverse("admin:article_extra_json")[:-1]
response = self.client.get(url)
self.assertRedirects(response, "%s?next=%s" % (reverse("admin:login"), url))
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_unknown_url(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
unknown_url = "/test_admin/admin/unknown/"
response = self.client.get(unknown_url[:-1])
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response, known_url, status_code=301, target_status_code=403
)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_script_name(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1], SCRIPT_NAME="/prefix/")
self.assertRedirects(
response,
"/prefix" + known_url,
status_code=301,
fetch_redirect_response=False,
)
@override_settings(APPEND_SLASH=True, FORCE_SCRIPT_NAME="/prefix/")
def test_missing_slash_append_slash_true_force_script_name(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response,
"/prefix" + known_url,
status_code=301,
fetch_redirect_response=False,
)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_non_staff_user(self):
user = User.objects.create_user(
username="user",
password="secret",
email="[email protected]",
is_staff=False,
)
self.client.force_login(user)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response,
"/test_admin/admin/login/?next=/test_admin/admin/admin_views/article",
)
@override_settings(APPEND_SLASH=False)
def test_missing_slash_append_slash_false(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_single_model_no_append_slash(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin9:admin_views_actor_changelist")
response = self.client.get(known_url[:-1])
self.assertEqual(response.status_code, 404)
# Same tests above with final_catch_all_view=False.
def test_unknown_url_404_if_not_authenticated_without_final_catch_all_view(self):
unknown_url = "/test_admin/admin10/unknown/"
response = self.client.get(unknown_url)
self.assertEqual(response.status_code, 404)
def test_unknown_url_404_if_authenticated_without_final_catch_all_view(self):
superuser = User.objects.create_superuser(
username="super",
password="secret",
email="[email protected]",
)
self.client.force_login(superuser)
unknown_url = "/test_admin/admin10/unknown/"
response = self.client.get(unknown_url)
self.assertEqual(response.status_code, 404)
def test_known_url_redirects_login_if_not_auth_without_final_catch_all_view(
self,
):
known_url = reverse("admin10:admin_views_article_changelist")
response = self.client.get(known_url)
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin10:login"), known_url)
)
def test_known_url_missing_slash_redirects_with_slash_if_not_auth_no_catch_all_view(
self,
):
known_url = reverse("admin10:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response, known_url, status_code=301, fetch_redirect_response=False
)
def test_non_admin_url_shares_url_prefix_without_final_catch_all_view(self):
url = reverse("non_admin10")
response = self.client.get(url[:-1])
self.assertRedirects(response, url, status_code=301)
def test_url_no_trailing_slash_if_not_auth_without_final_catch_all_view(
self,
):
url = reverse("admin10:article_extra_json")
response = self.client.get(url)
self.assertRedirects(response, "%s?next=%s" % (reverse("admin10:login"), url))
def test_unknown_url_no_trailing_slash_if_not_auth_without_final_catch_all_view(
self,
):
url = reverse("admin10:article_extra_json")[:-1]
response = self.client.get(url)
# Matches test_admin/admin10/admin_views/article/<path:object_id>/
self.assertRedirects(
response, url + "/", status_code=301, fetch_redirect_response=False
)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_unknown_url_without_final_catch_all_view(
self,
):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
unknown_url = "/test_admin/admin10/unknown/"
response = self.client.get(unknown_url[:-1])
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_without_final_catch_all_view(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin10:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response, known_url, status_code=301, target_status_code=403
)
@override_settings(APPEND_SLASH=False)
def test_missing_slash_append_slash_false_without_final_catch_all_view(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin10:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertEqual(response.status_code, 404)
# Outside admin.
def test_non_admin_url_404_if_not_authenticated(self):
unknown_url = "/unknown/"
response = self.client.get(unknown_url)
# Does not redirect to the admin login.
self.assertEqual(response.status_code, 404)
|
84546722aa76fd66009c3baec0501c9b41c13a789549dcea73661753565a79b9 | import decimal
import enum
import json
import unittest
import uuid
from django import forms
from django.contrib.admin.utils import display_for_field
from django.core import checks, exceptions, serializers, validators
from django.core.exceptions import FieldError
from django.core.management import call_command
from django.db import IntegrityError, connection, models
from django.db.models.expressions import Exists, OuterRef, RawSQL, Value
from django.db.models.functions import Cast, JSONObject, Upper
from django.test import TransactionTestCase, modify_settings, override_settings
from django.test.utils import isolate_apps
from django.utils import timezone
from . import PostgreSQLSimpleTestCase, PostgreSQLTestCase, PostgreSQLWidgetTestCase
from .models import (
ArrayEnumModel,
ArrayFieldSubclass,
CharArrayModel,
DateTimeArrayModel,
IntegerArrayModel,
NestedIntegerArrayModel,
NullableIntegerArrayModel,
OtherTypesArrayModel,
PostgreSQLModel,
Tag,
)
try:
from psycopg2.extras import NumericRange
from django.contrib.postgres.aggregates import ArrayAgg
from django.contrib.postgres.expressions import ArraySubquery
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.fields.array import IndexTransform, SliceTransform
from django.contrib.postgres.forms import (
SimpleArrayField,
SplitArrayField,
SplitArrayWidget,
)
except ImportError:
pass
@isolate_apps("postgres_tests")
class BasicTests(PostgreSQLSimpleTestCase):
def test_get_field_display(self):
class MyModel(PostgreSQLModel):
field = ArrayField(
models.CharField(max_length=16),
choices=[
["Media", [(["vinyl", "cd"], "Audio")]],
(("mp3", "mp4"), "Digital"),
],
)
tests = (
(["vinyl", "cd"], "Audio"),
(("mp3", "mp4"), "Digital"),
(("a", "b"), "('a', 'b')"),
(["c", "d"], "['c', 'd']"),
)
for value, display in tests:
with self.subTest(value=value, display=display):
instance = MyModel(field=value)
self.assertEqual(instance.get_field_display(), display)
def test_get_field_display_nested_array(self):
class MyModel(PostgreSQLModel):
field = ArrayField(
ArrayField(models.CharField(max_length=16)),
choices=[
[
"Media",
[([["vinyl", "cd"], ("x",)], "Audio")],
],
((["mp3"], ("mp4",)), "Digital"),
],
)
tests = (
([["vinyl", "cd"], ("x",)], "Audio"),
((["mp3"], ("mp4",)), "Digital"),
((("a", "b"), ("c",)), "(('a', 'b'), ('c',))"),
([["a", "b"], ["c"]], "[['a', 'b'], ['c']]"),
)
for value, display in tests:
with self.subTest(value=value, display=display):
instance = MyModel(field=value)
self.assertEqual(instance.get_field_display(), display)
class TestSaveLoad(PostgreSQLTestCase):
def test_integer(self):
instance = IntegerArrayModel(field=[1, 2, 3])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_char(self):
instance = CharArrayModel(field=["hello", "goodbye"])
instance.save()
loaded = CharArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_dates(self):
instance = DateTimeArrayModel(
datetimes=[timezone.now()],
dates=[timezone.now().date()],
times=[timezone.now().time()],
)
instance.save()
loaded = DateTimeArrayModel.objects.get()
self.assertEqual(instance.datetimes, loaded.datetimes)
self.assertEqual(instance.dates, loaded.dates)
self.assertEqual(instance.times, loaded.times)
def test_tuples(self):
instance = IntegerArrayModel(field=(1,))
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertSequenceEqual(instance.field, loaded.field)
def test_integers_passed_as_strings(self):
# This checks that get_prep_value is deferred properly
instance = IntegerArrayModel(field=["1"])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(loaded.field, [1])
def test_default_null(self):
instance = NullableIntegerArrayModel()
instance.save()
loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk)
self.assertIsNone(loaded.field)
self.assertEqual(instance.field, loaded.field)
def test_null_handling(self):
instance = NullableIntegerArrayModel(field=None)
instance.save()
loaded = NullableIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
instance = IntegerArrayModel(field=None)
with self.assertRaises(IntegrityError):
instance.save()
def test_nested(self):
instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]])
instance.save()
loaded = NestedIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_other_array_types(self):
instance = OtherTypesArrayModel(
ips=["192.168.0.1", "::1"],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=[Tag(1), Tag(2), Tag(3)],
json=[{"a": 1}, {"b": 2}],
int_ranges=[NumericRange(10, 20), NumericRange(30, 40)],
bigint_ranges=[
NumericRange(7000000000, 10000000000),
NumericRange(50000000000, 70000000000),
],
)
instance.save()
loaded = OtherTypesArrayModel.objects.get()
self.assertEqual(instance.ips, loaded.ips)
self.assertEqual(instance.uuids, loaded.uuids)
self.assertEqual(instance.decimals, loaded.decimals)
self.assertEqual(instance.tags, loaded.tags)
self.assertEqual(instance.json, loaded.json)
self.assertEqual(instance.int_ranges, loaded.int_ranges)
self.assertEqual(instance.bigint_ranges, loaded.bigint_ranges)
def test_null_from_db_value_handling(self):
instance = OtherTypesArrayModel.objects.create(
ips=["192.168.0.1", "::1"],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=None,
)
instance.refresh_from_db()
self.assertIsNone(instance.tags)
self.assertEqual(instance.json, [])
self.assertIsNone(instance.int_ranges)
self.assertIsNone(instance.bigint_ranges)
def test_model_set_on_base_field(self):
instance = IntegerArrayModel()
field = instance._meta.get_field("field")
self.assertEqual(field.model, IntegerArrayModel)
self.assertEqual(field.base_field.model, IntegerArrayModel)
def test_nested_nullable_base_field(self):
instance = NullableIntegerArrayModel.objects.create(
field_nested=[[None, None], [None, None]],
)
self.assertEqual(instance.field_nested, [[None, None], [None, None]])
class TestQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
cls.objs = NullableIntegerArrayModel.objects.bulk_create(
[
NullableIntegerArrayModel(order=1, field=[1]),
NullableIntegerArrayModel(order=2, field=[2]),
NullableIntegerArrayModel(order=3, field=[2, 3]),
NullableIntegerArrayModel(order=4, field=[20, 30, 40]),
NullableIntegerArrayModel(order=5, field=None),
]
)
def test_empty_list(self):
NullableIntegerArrayModel.objects.create(field=[])
obj = (
NullableIntegerArrayModel.objects.annotate(
empty_array=models.Value(
[], output_field=ArrayField(models.IntegerField())
),
)
.filter(field=models.F("empty_array"))
.get()
)
self.assertEqual(obj.field, [])
self.assertEqual(obj.empty_array, [])
def test_exact(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[1]), self.objs[:1]
)
def test_exact_with_expression(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[Value(1)]),
self.objs[:1],
)
def test_exact_charfield(self):
instance = CharArrayModel.objects.create(field=["text"])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field=["text"]), [instance]
)
def test_exact_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field=[[1, 2], [3, 4]]), [instance]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__isnull=True), self.objs[-1:]
)
def test_gt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__gt=[0]), self.objs[:4]
)
def test_lt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__lt=[2]), self.objs[:1]
)
def test_in(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]),
self.objs[:2],
)
def test_in_subquery(self):
IntegerArrayModel.objects.create(field=[2, 3])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__in=IntegerArrayModel.objects.values_list("field", flat=True)
),
self.objs[2:3],
)
@unittest.expectedFailure
def test_in_including_F_object(self):
# This test asserts that Array objects passed to filters can be
# constructed to contain F objects. This currently doesn't work as the
# psycopg2 mogrify method that generates the ARRAY() syntax is
# expecting literals, not column references (#27095).
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[models.F("id")]]),
self.objs[:2],
)
def test_in_as_F_object(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[models.F("field")]),
self.objs[:4],
)
def test_contained_by(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]),
self.objs[:2],
)
def test_contained_by_including_F_object(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__contained_by=[models.F("order"), 2]
),
self.objs[:3],
)
def test_contains(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=[2]),
self.objs[1:3],
)
def test_contains_subquery(self):
IntegerArrayModel.objects.create(field=[2, 3])
inner_qs = IntegerArrayModel.objects.values_list("field", flat=True)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=inner_qs[:1]),
self.objs[2:3],
)
inner_qs = IntegerArrayModel.objects.filter(field__contains=OuterRef("field"))
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(Exists(inner_qs)),
self.objs[1:3],
)
def test_contains_including_expression(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__contains=[2, Value(6) / Value(2)],
),
self.objs[2:3],
)
def test_icontains(self):
# Using the __icontains lookup with ArrayField is inefficient.
instance = CharArrayModel.objects.create(field=["FoO"])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__icontains="foo"), [instance]
)
def test_contains_charfield(self):
# Regression for #22907
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contains=["text"]), []
)
def test_contained_by_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contained_by=["text"]), []
)
def test_overlap_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__overlap=["text"]), []
)
def test_overlap_charfield_including_expression(self):
obj_1 = CharArrayModel.objects.create(field=["TEXT", "lower text"])
obj_2 = CharArrayModel.objects.create(field=["lower text", "TEXT"])
CharArrayModel.objects.create(field=["lower text", "text"])
self.assertSequenceEqual(
CharArrayModel.objects.filter(
field__overlap=[
Upper(Value("text")),
"other",
]
),
[obj_1, obj_2],
)
def test_lookups_autofield_array(self):
qs = (
NullableIntegerArrayModel.objects.filter(
field__0__isnull=False,
)
.values("field__0")
.annotate(
arrayagg=ArrayAgg("id"),
)
.order_by("field__0")
)
tests = (
("contained_by", [self.objs[1].pk, self.objs[2].pk, 0], [2]),
("contains", [self.objs[2].pk], [2]),
("exact", [self.objs[3].pk], [20]),
("overlap", [self.objs[1].pk, self.objs[3].pk], [2, 20]),
)
for lookup, value, expected in tests:
with self.subTest(lookup=lookup):
self.assertSequenceEqual(
qs.filter(
**{"arrayagg__" + lookup: value},
).values_list("field__0", flat=True),
expected,
)
def test_index(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0=2), self.objs[1:3]
)
def test_index_chained(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0__lt=3), self.objs[0:3]
)
def test_index_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0=1), [instance]
)
@unittest.expectedFailure
def test_index_used_on_nested_data(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0=[1, 2]), [instance]
)
def test_index_transform_expression(self):
expr = RawSQL("string_to_array(%s, ';')", ["1;2"])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__0=Cast(
IndexTransform(1, models.IntegerField, expr),
output_field=models.IntegerField(),
),
),
self.objs[:1],
)
def test_index_annotation(self):
qs = NullableIntegerArrayModel.objects.annotate(second=models.F("field__1"))
self.assertCountEqual(
qs.values_list("second", flat=True),
[None, None, None, 3, 30],
)
def test_overlap(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]),
self.objs[0:3],
)
def test_len(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len__lte=2), self.objs[0:3]
)
def test_len_empty_array(self):
obj = NullableIntegerArrayModel.objects.create(field=[])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len=0), [obj]
)
def test_slice(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_1=[2]), self.objs[1:3]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]), self.objs[2:3]
)
def test_order_by_slice(self):
more_objs = (
NullableIntegerArrayModel.objects.create(field=[1, 637]),
NullableIntegerArrayModel.objects.create(field=[2, 1]),
NullableIntegerArrayModel.objects.create(field=[3, -98123]),
NullableIntegerArrayModel.objects.create(field=[4, 2]),
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.order_by("field__1"),
[
more_objs[2],
more_objs[1],
more_objs[3],
self.objs[2],
self.objs[3],
more_objs[0],
self.objs[4],
self.objs[1],
self.objs[0],
],
)
@unittest.expectedFailure
def test_slice_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]), [instance]
)
def test_slice_transform_expression(self):
expr = RawSQL("string_to_array(%s, ';')", ["9;2;3"])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__0_2=SliceTransform(2, 3, expr)
),
self.objs[2:3],
)
def test_slice_annotation(self):
qs = NullableIntegerArrayModel.objects.annotate(
first_two=models.F("field__0_2"),
)
self.assertCountEqual(
qs.values_list("first_two", flat=True),
[None, [1], [2], [2, 3], [20, 30]],
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
id__in=NullableIntegerArrayModel.objects.filter(field__len=3)
),
[self.objs[3]],
)
def test_enum_lookup(self):
class TestEnum(enum.Enum):
VALUE_1 = "value_1"
instance = ArrayEnumModel.objects.create(array_of_enums=[TestEnum.VALUE_1])
self.assertSequenceEqual(
ArrayEnumModel.objects.filter(array_of_enums__contains=[TestEnum.VALUE_1]),
[instance],
)
def test_unsupported_lookup(self):
msg = (
"Unsupported lookup '0_bar' for ArrayField or join on the field not "
"permitted."
)
with self.assertRaisesMessage(FieldError, msg):
list(NullableIntegerArrayModel.objects.filter(field__0_bar=[2]))
msg = (
"Unsupported lookup '0bar' for ArrayField or join on the field not "
"permitted."
)
with self.assertRaisesMessage(FieldError, msg):
list(NullableIntegerArrayModel.objects.filter(field__0bar=[2]))
def test_grouping_by_annotations_with_array_field_param(self):
value = models.Value([1], output_field=ArrayField(models.IntegerField()))
self.assertEqual(
NullableIntegerArrayModel.objects.annotate(
array_length=models.Func(
value,
1,
function="ARRAY_LENGTH",
output_field=models.IntegerField(),
),
)
.values("array_length")
.annotate(
count=models.Count("pk"),
)
.get()["array_length"],
1,
)
def test_filter_by_array_subquery(self):
inner_qs = NullableIntegerArrayModel.objects.filter(
field__len=models.OuterRef("field__len"),
).values("field")
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.alias(
same_sized_fields=ArraySubquery(inner_qs),
).filter(same_sized_fields__len__gt=1),
self.objs[0:2],
)
def test_annotated_array_subquery(self):
inner_qs = NullableIntegerArrayModel.objects.exclude(
pk=models.OuterRef("pk")
).values("order")
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.annotate(
sibling_ids=ArraySubquery(inner_qs),
)
.get(order=1)
.sibling_ids,
[2, 3, 4, 5],
)
def test_group_by_with_annotated_array_subquery(self):
inner_qs = NullableIntegerArrayModel.objects.exclude(
pk=models.OuterRef("pk")
).values("order")
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.annotate(
sibling_ids=ArraySubquery(inner_qs),
sibling_count=models.Max("sibling_ids__len"),
).values_list("sibling_count", flat=True),
[len(self.objs) - 1] * len(self.objs),
)
def test_annotated_ordered_array_subquery(self):
inner_qs = NullableIntegerArrayModel.objects.order_by("-order").values("order")
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.annotate(
ids=ArraySubquery(inner_qs),
)
.first()
.ids,
[5, 4, 3, 2, 1],
)
def test_annotated_array_subquery_with_json_objects(self):
inner_qs = NullableIntegerArrayModel.objects.exclude(
pk=models.OuterRef("pk")
).values(json=JSONObject(order="order", field="field"))
siblings_json = (
NullableIntegerArrayModel.objects.annotate(
siblings_json=ArraySubquery(inner_qs),
)
.values_list("siblings_json", flat=True)
.get(order=1)
)
self.assertSequenceEqual(
siblings_json,
[
{"field": [2], "order": 2},
{"field": [2, 3], "order": 3},
{"field": [20, 30, 40], "order": 4},
{"field": None, "order": 5},
],
)
class TestDateTimeExactQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
now = timezone.now()
cls.datetimes = [now]
cls.dates = [now.date()]
cls.times = [now.time()]
cls.objs = [
DateTimeArrayModel.objects.create(
datetimes=cls.datetimes, dates=cls.dates, times=cls.times
),
]
def test_exact_datetimes(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(datetimes=self.datetimes), self.objs
)
def test_exact_dates(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(dates=self.dates), self.objs
)
def test_exact_times(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(times=self.times), self.objs
)
class TestOtherTypesExactQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
cls.ips = ["192.168.0.1", "::1"]
cls.uuids = [uuid.uuid4()]
cls.decimals = [decimal.Decimal(1.25), 1.75]
cls.tags = [Tag(1), Tag(2), Tag(3)]
cls.objs = [
OtherTypesArrayModel.objects.create(
ips=cls.ips,
uuids=cls.uuids,
decimals=cls.decimals,
tags=cls.tags,
)
]
def test_exact_ip_addresses(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(ips=self.ips), self.objs
)
def test_exact_uuids(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(uuids=self.uuids), self.objs
)
def test_exact_decimals(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(decimals=self.decimals), self.objs
)
def test_exact_tags(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(tags=self.tags), self.objs
)
@isolate_apps("postgres_tests")
class TestChecks(PostgreSQLSimpleTestCase):
def test_field_checks(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.CharField())
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, "postgres.E001")
self.assertIn("max_length", errors[0].msg)
def test_invalid_base_fields(self):
class MyModel(PostgreSQLModel):
field = ArrayField(
models.ManyToManyField("postgres_tests.IntegerArrayModel")
)
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, "postgres.E002")
def test_invalid_default(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), default=[])
model = MyModel()
self.assertEqual(
model.check(),
[
checks.Warning(
msg=(
"ArrayField default should be a callable instead of an "
"instance so that it's not shared between all field "
"instances."
),
hint="Use a callable instead, e.g., use `list` instead of `[]`.",
obj=MyModel._meta.get_field("field"),
id="fields.E010",
)
],
)
def test_valid_default(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), default=list)
model = MyModel()
self.assertEqual(model.check(), [])
def test_valid_default_none(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), default=None)
model = MyModel()
self.assertEqual(model.check(), [])
def test_nested_field_checks(self):
"""
Nested ArrayFields are permitted.
"""
class MyModel(PostgreSQLModel):
field = ArrayField(ArrayField(models.CharField()))
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, "postgres.E001")
self.assertIn("max_length", errors[0].msg)
def test_choices_tuple_list(self):
class MyModel(PostgreSQLModel):
field = ArrayField(
models.CharField(max_length=16),
choices=[
[
"Media",
[(["vinyl", "cd"], "Audio"), (("vhs", "dvd"), "Video")],
],
(["mp3", "mp4"], "Digital"),
],
)
self.assertEqual(MyModel._meta.get_field("field").check(), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific tests")
class TestMigrations(TransactionTestCase):
available_apps = ["postgres_tests"]
def test_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(type(new.base_field), type(field.base_field))
self.assertIsNot(new.base_field, field.base_field)
def test_deconstruct_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.size, field.size)
def test_deconstruct_args(self):
field = ArrayField(models.CharField(max_length=20))
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.base_field.max_length, field.base_field.max_length)
def test_subclass_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.contrib.postgres.fields.ArrayField")
field = ArrayFieldSubclass()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "postgres_tests.models.ArrayFieldSubclass")
@override_settings(
MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_default_migrations",
}
)
def test_adding_field_with_default(self):
# See #22962
table_name = "postgres_tests_integerarraydefaultmodel"
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
call_command("migrate", "postgres_tests", verbosity=0)
with connection.cursor() as cursor:
self.assertIn(table_name, connection.introspection.table_names(cursor))
call_command("migrate", "postgres_tests", "zero", verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
@override_settings(
MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_index_migrations",
}
)
def test_adding_arrayfield_with_index(self):
"""
ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops indexes.
"""
table_name = "postgres_tests_chartextarrayindexmodel"
call_command("migrate", "postgres_tests", verbosity=0)
with connection.cursor() as cursor:
like_constraint_columns_list = [
v["columns"]
for k, v in list(
connection.introspection.get_constraints(cursor, table_name).items()
)
if k.endswith("_like")
]
# Only the CharField should have a LIKE index.
self.assertEqual(like_constraint_columns_list, [["char2"]])
# All fields should have regular indexes.
with connection.cursor() as cursor:
indexes = [
c["columns"][0]
for c in connection.introspection.get_constraints(
cursor, table_name
).values()
if c["index"] and len(c["columns"]) == 1
]
self.assertIn("char", indexes)
self.assertIn("char2", indexes)
self.assertIn("text", indexes)
call_command("migrate", "postgres_tests", "zero", verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
class TestSerialization(PostgreSQLSimpleTestCase):
test_data = (
'[{"fields": {"field": "[\\"1\\", \\"2\\", null]"}, '
'"model": "postgres_tests.integerarraymodel", "pk": null}]'
)
def test_dumping(self):
instance = IntegerArrayModel(field=[1, 2, None])
data = serializers.serialize("json", [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize("json", self.test_data))[0].object
self.assertEqual(instance.field, [1, 2, None])
class TestValidation(PostgreSQLSimpleTestCase):
def test_unbounded(self):
field = ArrayField(models.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, None], None)
self.assertEqual(cm.exception.code, "item_invalid")
self.assertEqual(
cm.exception.message % cm.exception.params,
"Item 2 in the array did not validate: This field cannot be null.",
)
def test_blank_true(self):
field = ArrayField(models.IntegerField(blank=True, null=True))
# This should not raise a validation error
field.clean([1, None], None)
def test_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
field.clean([1, 2, 3], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, 2, 3, 4], None)
self.assertEqual(
cm.exception.messages[0],
"List contains 4 items, it should contain no more than 3.",
)
def test_nested_array_mismatch(self):
field = ArrayField(ArrayField(models.IntegerField()))
field.clean([[1, 2], [3, 4]], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([[1, 2], [3, 4, 5]], None)
self.assertEqual(cm.exception.code, "nested_array_mismatch")
self.assertEqual(
cm.exception.messages[0], "Nested arrays must have the same length."
)
def test_with_base_field_error_params(self):
field = ArrayField(models.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(["abc"], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
"Item 1 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 3).",
)
self.assertEqual(exception.code, "item_invalid")
self.assertEqual(
exception.params,
{"nth": 1, "value": "abc", "limit_value": 2, "show_value": 3},
)
def test_with_validators(self):
field = ArrayField(
models.IntegerField(validators=[validators.MinValueValidator(1)])
)
field.clean([1, 2], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([0], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
"Item 1 in the array did not validate: Ensure this value is greater than "
"or equal to 1.",
)
self.assertEqual(exception.code, "item_invalid")
self.assertEqual(
exception.params, {"nth": 1, "value": 0, "limit_value": 1, "show_value": 0}
)
class TestSimpleFormField(PostgreSQLSimpleTestCase):
def test_valid(self):
field = SimpleArrayField(forms.CharField())
value = field.clean("a,b,c")
self.assertEqual(value, ["a", "b", "c"])
def test_to_python_fail(self):
field = SimpleArrayField(forms.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("a,b,9")
self.assertEqual(
cm.exception.messages[0],
"Item 1 in the array did not validate: Enter a whole number.",
)
def test_validate_fail(self):
field = SimpleArrayField(forms.CharField(required=True))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("a,b,")
self.assertEqual(
cm.exception.messages[0],
"Item 3 in the array did not validate: This field is required.",
)
def test_validate_fail_base_field_error_params(self):
field = SimpleArrayField(forms.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("abc,c,defg")
errors = cm.exception.error_list
self.assertEqual(len(errors), 2)
first_error = errors[0]
self.assertEqual(
first_error.message,
"Item 1 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 3).",
)
self.assertEqual(first_error.code, "item_invalid")
self.assertEqual(
first_error.params,
{"nth": 1, "value": "abc", "limit_value": 2, "show_value": 3},
)
second_error = errors[1]
self.assertEqual(
second_error.message,
"Item 3 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 4).",
)
self.assertEqual(second_error.code, "item_invalid")
self.assertEqual(
second_error.params,
{"nth": 3, "value": "defg", "limit_value": 2, "show_value": 4},
)
def test_validators_fail(self):
field = SimpleArrayField(forms.RegexField("[a-e]{2}"))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("a,bc,de")
self.assertEqual(
cm.exception.messages[0],
"Item 1 in the array did not validate: Enter a valid value.",
)
def test_delimiter(self):
field = SimpleArrayField(forms.CharField(), delimiter="|")
value = field.clean("a|b|c")
self.assertEqual(value, ["a", "b", "c"])
def test_delimiter_with_nesting(self):
field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter="|")
value = field.clean("a,b|c,d")
self.assertEqual(value, [["a", "b"], ["c", "d"]])
def test_prepare_value(self):
field = SimpleArrayField(forms.CharField())
value = field.prepare_value(["a", "b", "c"])
self.assertEqual(value, "a,b,c")
def test_max_length(self):
field = SimpleArrayField(forms.CharField(), max_length=2)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("a,b,c")
self.assertEqual(
cm.exception.messages[0],
"List contains 3 items, it should contain no more than 2.",
)
def test_min_length(self):
field = SimpleArrayField(forms.CharField(), min_length=4)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("a,b,c")
self.assertEqual(
cm.exception.messages[0],
"List contains 3 items, it should contain no fewer than 4.",
)
def test_required(self):
field = SimpleArrayField(forms.CharField(), required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("")
self.assertEqual(cm.exception.messages[0], "This field is required.")
def test_model_field_formfield(self):
model_field = ArrayField(models.CharField(max_length=27))
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertIsInstance(form_field.base_field, forms.CharField)
self.assertEqual(form_field.base_field.max_length, 27)
def test_model_field_formfield_size(self):
model_field = ArrayField(models.CharField(max_length=27), size=4)
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertEqual(form_field.max_length, 4)
def test_model_field_choices(self):
model_field = ArrayField(models.IntegerField(choices=((1, "A"), (2, "B"))))
form_field = model_field.formfield()
self.assertEqual(form_field.clean("1,2"), [1, 2])
def test_already_converted_value(self):
field = SimpleArrayField(forms.CharField())
vals = ["a", "b", "c"]
self.assertEqual(field.clean(vals), vals)
def test_has_changed(self):
field = SimpleArrayField(forms.IntegerField())
self.assertIs(field.has_changed([1, 2], [1, 2]), False)
self.assertIs(field.has_changed([1, 2], "1,2"), False)
self.assertIs(field.has_changed([1, 2], "1,2,3"), True)
self.assertIs(field.has_changed([1, 2], "a,b"), True)
def test_has_changed_empty(self):
field = SimpleArrayField(forms.CharField())
self.assertIs(field.has_changed(None, None), False)
self.assertIs(field.has_changed(None, ""), False)
self.assertIs(field.has_changed(None, []), False)
self.assertIs(field.has_changed([], None), False)
self.assertIs(field.has_changed([], ""), False)
class TestSplitFormField(PostgreSQLSimpleTestCase):
def test_valid(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {"array_0": "a", "array_1": "b", "array_2": "c"}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {"array": ["a", "b", "c"]})
def test_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), required=True, size=3)
data = {"array_0": "", "array_1": "", "array_2": ""}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {"array": ["This field is required."]})
def test_remove_trailing_nulls(self):
class SplitForm(forms.Form):
array = SplitArrayField(
forms.CharField(required=False), size=5, remove_trailing_nulls=True
)
data = {
"array_0": "a",
"array_1": "",
"array_2": "b",
"array_3": "",
"array_4": "",
}
form = SplitForm(data)
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data, {"array": ["a", "", "b"]})
def test_remove_trailing_nulls_not_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(
forms.CharField(required=False),
size=2,
remove_trailing_nulls=True,
required=False,
)
data = {"array_0": "", "array_1": ""}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {"array": []})
def test_required_field(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {"array_0": "a", "array_1": "b", "array_2": ""}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
"array": [
"Item 3 in the array did not validate: This field is required."
]
},
)
def test_invalid_integer(self):
msg = (
"Item 2 in the array did not validate: Ensure this value is less than or "
"equal to 100."
)
with self.assertRaisesMessage(exceptions.ValidationError, msg):
SplitArrayField(forms.IntegerField(max_value=100), size=2).clean([0, 101])
# To locate the widget's template.
@modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"})
def test_rendering(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
self.assertHTMLEqual(
str(SplitForm()),
"""
<div>
<label for="id_array_0">Array:</label>
<input id="id_array_0" name="array_0" type="text" required>
<input id="id_array_1" name="array_1" type="text" required>
<input id="id_array_2" name="array_2" type="text" required>
</div>
""",
)
def test_invalid_char_length(self):
field = SplitArrayField(forms.CharField(max_length=2), size=3)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(["abc", "c", "defg"])
self.assertEqual(
cm.exception.messages,
[
"Item 1 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 3).",
"Item 3 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 4).",
],
)
def test_splitarraywidget_value_omitted_from_data(self):
class Form(forms.ModelForm):
field = SplitArrayField(forms.IntegerField(), required=False, size=2)
class Meta:
model = IntegerArrayModel
fields = ("field",)
form = Form({"field_0": "1", "field_1": "2"})
self.assertEqual(form.errors, {})
obj = form.save(commit=False)
self.assertEqual(obj.field, [1, 2])
def test_splitarrayfield_has_changed(self):
class Form(forms.ModelForm):
field = SplitArrayField(forms.IntegerField(), required=False, size=2)
class Meta:
model = IntegerArrayModel
fields = ("field",)
tests = [
({}, {"field_0": "", "field_1": ""}, True),
({"field": None}, {"field_0": "", "field_1": ""}, True),
({"field": [1]}, {"field_0": "", "field_1": ""}, True),
({"field": [1]}, {"field_0": "1", "field_1": "0"}, True),
({"field": [1, 2]}, {"field_0": "1", "field_1": "2"}, False),
({"field": [1, 2]}, {"field_0": "a", "field_1": "b"}, True),
]
for initial, data, expected_result in tests:
with self.subTest(initial=initial, data=data):
obj = IntegerArrayModel(**initial)
form = Form(data, instance=obj)
self.assertIs(form.has_changed(), expected_result)
def test_splitarrayfield_remove_trailing_nulls_has_changed(self):
class Form(forms.ModelForm):
field = SplitArrayField(
forms.IntegerField(), required=False, size=2, remove_trailing_nulls=True
)
class Meta:
model = IntegerArrayModel
fields = ("field",)
tests = [
({}, {"field_0": "", "field_1": ""}, False),
({"field": None}, {"field_0": "", "field_1": ""}, False),
({"field": []}, {"field_0": "", "field_1": ""}, False),
({"field": [1]}, {"field_0": "1", "field_1": ""}, False),
]
for initial, data, expected_result in tests:
with self.subTest(initial=initial, data=data):
obj = IntegerArrayModel(**initial)
form = Form(data, instance=obj)
self.assertIs(form.has_changed(), expected_result)
class TestSplitFormWidget(PostgreSQLWidgetTestCase):
def test_get_context(self):
self.assertEqual(
SplitArrayWidget(forms.TextInput(), size=2).get_context(
"name", ["val1", "val2"]
),
{
"widget": {
"name": "name",
"is_hidden": False,
"required": False,
"value": "['val1', 'val2']",
"attrs": {},
"template_name": "postgres/widgets/split_array.html",
"subwidgets": [
{
"name": "name_0",
"is_hidden": False,
"required": False,
"value": "val1",
"attrs": {},
"template_name": "django/forms/widgets/text.html",
"type": "text",
},
{
"name": "name_1",
"is_hidden": False,
"required": False,
"value": "val2",
"attrs": {},
"template_name": "django/forms/widgets/text.html",
"type": "text",
},
],
}
},
)
def test_checkbox_get_context_attrs(self):
context = SplitArrayWidget(
forms.CheckboxInput(),
size=2,
).get_context("name", [True, False])
self.assertEqual(context["widget"]["value"], "[True, False]")
self.assertEqual(
[subwidget["attrs"] for subwidget in context["widget"]["subwidgets"]],
[{"checked": True}, {}],
)
def test_render(self):
self.check_html(
SplitArrayWidget(forms.TextInput(), size=2),
"array",
None,
"""
<input name="array_0" type="text">
<input name="array_1" type="text">
""",
)
def test_render_attrs(self):
self.check_html(
SplitArrayWidget(forms.TextInput(), size=2),
"array",
["val1", "val2"],
attrs={"id": "foo"},
html=(
"""
<input id="foo_0" name="array_0" type="text" value="val1">
<input id="foo_1" name="array_1" type="text" value="val2">
"""
),
)
def test_value_omitted_from_data(self):
widget = SplitArrayWidget(forms.TextInput(), size=2)
self.assertIs(widget.value_omitted_from_data({}, {}, "field"), True)
self.assertIs(
widget.value_omitted_from_data({"field_0": "value"}, {}, "field"), False
)
self.assertIs(
widget.value_omitted_from_data({"field_1": "value"}, {}, "field"), False
)
self.assertIs(
widget.value_omitted_from_data(
{"field_0": "value", "field_1": "value"}, {}, "field"
),
False,
)
class TestAdminUtils(PostgreSQLTestCase):
empty_value = "-empty-"
def test_array_display_for_field(self):
array_field = ArrayField(models.IntegerField())
display_value = display_for_field(
[1, 2],
array_field,
self.empty_value,
)
self.assertEqual(display_value, "1, 2")
def test_array_with_choices_display_for_field(self):
array_field = ArrayField(
models.IntegerField(),
choices=[
([1, 2, 3], "1st choice"),
([1, 2], "2nd choice"),
],
)
display_value = display_for_field(
[1, 2],
array_field,
self.empty_value,
)
self.assertEqual(display_value, "2nd choice")
display_value = display_for_field(
[99, 99],
array_field,
self.empty_value,
)
self.assertEqual(display_value, self.empty_value)
|
6974a14fd1bf1a0dff0b27dd4de6e764bb8f14b1ea7c6ab9a83690751129eceb | import datetime
import itertools
import unittest
from copy import copy
from unittest import mock
from django.core.exceptions import FieldError
from django.core.management.color import no_style
from django.db import (
DatabaseError,
DataError,
IntegrityError,
OperationalError,
connection,
)
from django.db.models import (
CASCADE,
PROTECT,
AutoField,
BigAutoField,
BigIntegerField,
BinaryField,
BooleanField,
CharField,
CheckConstraint,
DateField,
DateTimeField,
DecimalField,
DurationField,
F,
FloatField,
ForeignKey,
ForeignObject,
Index,
IntegerField,
JSONField,
ManyToManyField,
Model,
OneToOneField,
OrderBy,
PositiveIntegerField,
Q,
SlugField,
SmallAutoField,
SmallIntegerField,
TextField,
TimeField,
UniqueConstraint,
UUIDField,
Value,
)
from django.db.models.fields.json import KeyTextTransform
from django.db.models.functions import Abs, Cast, Collate, Lower, Random, Upper
from django.db.models.indexes import IndexExpression
from django.db.transaction import TransactionManagementError, atomic
from django.test import (
TransactionTestCase,
ignore_warnings,
skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext, isolate_apps, register_lookup
from django.utils.deprecation import RemovedInDjango51Warning
from .fields import CustomManyToManyField, InheritedManyToManyField, MediumBlobField
from .models import (
Author,
AuthorCharFieldWithIndex,
AuthorTextFieldWithIndex,
AuthorWithDefaultHeight,
AuthorWithEvenLongerName,
AuthorWithIndexedName,
AuthorWithUniqueName,
AuthorWithUniqueNameAndBirthday,
Book,
BookForeignObj,
BookWeak,
BookWithLongName,
BookWithO2O,
BookWithoutAuthor,
BookWithSlug,
IntegerPK,
Node,
Note,
NoteRename,
Tag,
TagM2MTest,
TagUniqueRename,
Thing,
UniqueTest,
new_apps,
)
class SchemaTests(TransactionTestCase):
"""
Tests for the schema-alteration code.
Be aware that these tests are more liable than most to false results,
as sometimes the code to check if a test has worked is almost as complex
as the code it is testing.
"""
available_apps = []
models = [
Author,
AuthorCharFieldWithIndex,
AuthorTextFieldWithIndex,
AuthorWithDefaultHeight,
AuthorWithEvenLongerName,
Book,
BookWeak,
BookWithLongName,
BookWithO2O,
BookWithSlug,
IntegerPK,
Node,
Note,
Tag,
TagM2MTest,
TagUniqueRename,
Thing,
UniqueTest,
]
# Utility functions
def setUp(self):
# local_models should contain test dependent model classes that will be
# automatically removed from the app cache on test tear down.
self.local_models = []
# isolated_local_models contains models that are in test methods
# decorated with @isolate_apps.
self.isolated_local_models = []
def tearDown(self):
# Delete any tables made for our models
self.delete_tables()
new_apps.clear_cache()
for model in new_apps.get_models():
model._meta._expire_cache()
if "schema" in new_apps.all_models:
for model in self.local_models:
for many_to_many in model._meta.many_to_many:
through = many_to_many.remote_field.through
if through and through._meta.auto_created:
del new_apps.all_models["schema"][through._meta.model_name]
del new_apps.all_models["schema"][model._meta.model_name]
if self.isolated_local_models:
with connection.schema_editor() as editor:
for model in self.isolated_local_models:
editor.delete_model(model)
def delete_tables(self):
"Deletes all model tables for our models for a clean test environment"
converter = connection.introspection.identifier_converter
with connection.schema_editor() as editor:
connection.disable_constraint_checking()
table_names = connection.introspection.table_names()
if connection.features.ignores_table_name_case:
table_names = [table_name.lower() for table_name in table_names]
for model in itertools.chain(SchemaTests.models, self.local_models):
tbl = converter(model._meta.db_table)
if connection.features.ignores_table_name_case:
tbl = tbl.lower()
if tbl in table_names:
editor.delete_model(model)
table_names.remove(tbl)
connection.enable_constraint_checking()
def column_classes(self, model):
with connection.cursor() as cursor:
columns = {
d[0]: (connection.introspection.get_field_type(d[1], d), d)
for d in connection.introspection.get_table_description(
cursor,
model._meta.db_table,
)
}
# SQLite has a different format for field_type
for name, (type, desc) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
return columns
def get_primary_key(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_primary_key_column(cursor, table)
def get_indexes(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return [
c["columns"][0]
for c in connection.introspection.get_constraints(
cursor, table
).values()
if c["index"] and len(c["columns"]) == 1
]
def get_uniques(self, table):
with connection.cursor() as cursor:
return [
c["columns"][0]
for c in connection.introspection.get_constraints(
cursor, table
).values()
if c["unique"] and len(c["columns"]) == 1
]
def get_constraints(self, table):
"""
Get the constraints on a table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def get_constraints_for_column(self, model, column_name):
constraints = self.get_constraints(model._meta.db_table)
constraints_for_column = []
for name, details in constraints.items():
if details["columns"] == [column_name]:
constraints_for_column.append(name)
return sorted(constraints_for_column)
def check_added_field_default(
self,
schema_editor,
model,
field,
field_name,
expected_default,
cast_function=None,
):
with connection.cursor() as cursor:
schema_editor.add_field(model, field)
cursor.execute(
"SELECT {} FROM {};".format(field_name, model._meta.db_table)
)
database_default = cursor.fetchall()[0][0]
if cast_function and type(database_default) != type(expected_default):
database_default = cast_function(database_default)
self.assertEqual(database_default, expected_default)
def get_constraints_count(self, table, column, fk_to):
"""
Return a dict with keys 'fks', 'uniques, and 'indexes' indicating the
number of foreign keys, unique constraints, and indexes on
`table`.`column`. The `fk_to` argument is a 2-tuple specifying the
expected foreign key relationship's (table, column).
"""
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, table)
counts = {"fks": 0, "uniques": 0, "indexes": 0}
for c in constraints.values():
if c["columns"] == [column]:
if c["foreign_key"] == fk_to:
counts["fks"] += 1
if c["unique"]:
counts["uniques"] += 1
elif c["index"]:
counts["indexes"] += 1
return counts
def get_column_collation(self, table, column):
with connection.cursor() as cursor:
return next(
f.collation
for f in connection.introspection.get_table_description(cursor, table)
if f.name == column
)
def assertIndexOrder(self, table, index, order):
constraints = self.get_constraints(table)
self.assertIn(index, constraints)
index_orders = constraints[index]["orders"]
self.assertTrue(
all(val == expected for val, expected in zip(index_orders, order))
)
def assertForeignKeyExists(self, model, column, expected_fk_table, field="id"):
"""
Fail if the FK constraint on `model.Meta.db_table`.`column` to
`expected_fk_table`.id doesn't exist.
"""
if not connection.features.can_introspect_foreign_keys:
return
constraints = self.get_constraints(model._meta.db_table)
constraint_fk = None
for details in constraints.values():
if details["columns"] == [column] and details["foreign_key"]:
constraint_fk = details["foreign_key"]
break
self.assertEqual(constraint_fk, (expected_fk_table, field))
def assertForeignKeyNotExists(self, model, column, expected_fk_table):
if not connection.features.can_introspect_foreign_keys:
return
with self.assertRaises(AssertionError):
self.assertForeignKeyExists(model, column, expected_fk_table)
# Tests
def test_creation_deletion(self):
"""
Tries creating a model's table, and then deleting it.
"""
with connection.schema_editor() as editor:
# Create the table
editor.create_model(Author)
# The table is there
list(Author.objects.all())
# Clean up that table
editor.delete_model(Author)
# No deferred SQL should be left over.
self.assertEqual(editor.deferred_sql, [])
# The table is gone
with self.assertRaises(DatabaseError):
list(Author.objects.all())
@skipUnlessDBFeature("supports_foreign_keys")
def test_fk(self):
"Creating tables out of FK order, then repointing, works"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Book)
editor.create_model(Author)
editor.create_model(Tag)
# Initial tables are there
list(Author.objects.all())
list(Book.objects.all())
# Make sure the FK constraint is present
with self.assertRaises(IntegrityError):
Book.objects.create(
author_id=1,
title="Much Ado About Foreign Keys",
pub_date=datetime.datetime.now(),
)
# Repoint the FK constraint
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Tag, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
self.assertForeignKeyExists(Book, "author_id", "schema_tag")
@skipUnlessDBFeature("can_create_inline_fk")
def test_inline_fk(self):
# Create some tables.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.create_model(Note)
self.assertForeignKeyNotExists(Note, "book_id", "schema_book")
# Add a foreign key from one to the other.
with connection.schema_editor() as editor:
new_field = ForeignKey(Book, CASCADE)
new_field.set_attributes_from_name("book")
editor.add_field(Note, new_field)
self.assertForeignKeyExists(Note, "book_id", "schema_book")
# Creating a FK field with a constraint uses a single statement without
# a deferred ALTER TABLE.
self.assertFalse(
[
sql
for sql in (str(statement) for statement in editor.deferred_sql)
if sql.startswith("ALTER TABLE") and "ADD CONSTRAINT" in sql
]
)
@skipUnlessDBFeature("can_create_inline_fk")
def test_add_inline_fk_update_data(self):
with connection.schema_editor() as editor:
editor.create_model(Node)
# Add an inline foreign key and update data in the same transaction.
new_field = ForeignKey(Node, CASCADE, related_name="new_fk", null=True)
new_field.set_attributes_from_name("new_parent_fk")
parent = Node.objects.create()
with connection.schema_editor() as editor:
editor.add_field(Node, new_field)
editor.execute("UPDATE schema_node SET new_parent_fk_id = %s;", [parent.pk])
assertIndex = (
self.assertIn
if connection.features.indexes_foreign_keys
else self.assertNotIn
)
assertIndex("new_parent_fk_id", self.get_indexes(Node._meta.db_table))
@skipUnlessDBFeature(
"can_create_inline_fk",
"allows_multiple_constraints_on_same_fields",
)
@isolate_apps("schema")
def test_add_inline_fk_index_update_data(self):
class Node(Model):
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Node)
# Add an inline foreign key, update data, and an index in the same
# transaction.
new_field = ForeignKey(Node, CASCADE, related_name="new_fk", null=True)
new_field.set_attributes_from_name("new_parent_fk")
parent = Node.objects.create()
with connection.schema_editor() as editor:
editor.add_field(Node, new_field)
Node._meta.add_field(new_field)
editor.execute("UPDATE schema_node SET new_parent_fk_id = %s;", [parent.pk])
editor.add_index(
Node, Index(fields=["new_parent_fk"], name="new_parent_inline_fk_idx")
)
self.assertIn("new_parent_fk_id", self.get_indexes(Node._meta.db_table))
@skipUnlessDBFeature("supports_foreign_keys")
def test_char_field_with_db_index_to_fk(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorCharFieldWithIndex)
# Change CharField to FK
old_field = AuthorCharFieldWithIndex._meta.get_field("char_field")
new_field = ForeignKey(Author, CASCADE, blank=True)
new_field.set_attributes_from_name("char_field")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorCharFieldWithIndex, old_field, new_field, strict=True
)
self.assertForeignKeyExists(
AuthorCharFieldWithIndex, "char_field_id", "schema_author"
)
@skipUnlessDBFeature("supports_foreign_keys")
@skipUnlessDBFeature("supports_index_on_text_field")
def test_text_field_with_db_index_to_fk(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorTextFieldWithIndex)
# Change TextField to FK
old_field = AuthorTextFieldWithIndex._meta.get_field("text_field")
new_field = ForeignKey(Author, CASCADE, blank=True)
new_field.set_attributes_from_name("text_field")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorTextFieldWithIndex, old_field, new_field, strict=True
)
self.assertForeignKeyExists(
AuthorTextFieldWithIndex, "text_field_id", "schema_author"
)
@isolate_apps("schema")
def test_char_field_pk_to_auto_field(self):
class Foo(Model):
id = CharField(max_length=255, primary_key=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Foo
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
@skipUnlessDBFeature("supports_foreign_keys")
def test_fk_to_proxy(self):
"Creating a FK to a proxy model creates database constraints."
class AuthorProxy(Author):
class Meta:
app_label = "schema"
apps = new_apps
proxy = True
class AuthorRef(Model):
author = ForeignKey(AuthorProxy, on_delete=CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [AuthorProxy, AuthorRef]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorRef)
self.assertForeignKeyExists(AuthorRef, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_fk_db_constraint(self):
"The db_constraint parameter is respected"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Author)
editor.create_model(BookWeak)
# Initial tables are there
list(Author.objects.all())
list(Tag.objects.all())
list(BookWeak.objects.all())
self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author")
# Make a db_constraint=False FK
new_field = ForeignKey(Tag, CASCADE, db_constraint=False)
new_field.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
self.assertForeignKeyNotExists(Author, "tag_id", "schema_tag")
# Alter to one with a constraint
new_field2 = ForeignKey(Tag, CASCADE)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
self.assertForeignKeyExists(Author, "tag_id", "schema_tag")
# Alter to one without a constraint again
new_field2 = ForeignKey(Tag, CASCADE)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field2, new_field, strict=True)
self.assertForeignKeyNotExists(Author, "tag_id", "schema_tag")
@isolate_apps("schema")
def test_no_db_constraint_added_during_primary_key_change(self):
"""
When a primary key that's pointed to by a ForeignKey with
db_constraint=False is altered, a foreign key constraint isn't added.
"""
class Author(Model):
class Meta:
app_label = "schema"
class BookWeak(Model):
author = ForeignKey(Author, CASCADE, db_constraint=False)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWeak)
self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author")
old_field = Author._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = Author
new_field.set_attributes_from_name("id")
# @isolate_apps() and inner models are needed to have the model
# relations populated, otherwise this doesn't act as a regression test.
self.assertEqual(len(new_field.model._meta.related_objects), 1)
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author")
def _test_m2m_db_constraint(self, M2MFieldClass):
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(LocalAuthorWithM2M)
# Initial tables are there
list(LocalAuthorWithM2M.objects.all())
list(Tag.objects.all())
# Make a db_constraint=False FK
new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False)
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
self.assertForeignKeyNotExists(
new_field.remote_field.through, "tag_id", "schema_tag"
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_m2m_db_constraint(self):
self._test_m2m_db_constraint(ManyToManyField)
@skipUnlessDBFeature("supports_foreign_keys")
def test_m2m_db_constraint_custom(self):
self._test_m2m_db_constraint(CustomManyToManyField)
@skipUnlessDBFeature("supports_foreign_keys")
def test_m2m_db_constraint_inherited(self):
self._test_m2m_db_constraint(InheritedManyToManyField)
def test_add_field(self):
"""
Tests adding fields to models
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add the new field
new_field = IntegerField(null=True)
new_field.set_attributes_from_name("age")
with CaptureQueriesContext(
connection
) as ctx, connection.schema_editor() as editor:
editor.add_field(Author, new_field)
drop_default_sql = editor.sql_alter_column_no_default % {
"column": editor.quote_name(new_field.name),
}
self.assertFalse(
any(drop_default_sql in query["sql"] for query in ctx.captured_queries)
)
# Table is not rebuilt.
self.assertIs(
any("CREATE TABLE" in query["sql"] for query in ctx.captured_queries), False
)
self.assertIs(
any("DROP TABLE" in query["sql"] for query in ctx.captured_queries), False
)
columns = self.column_classes(Author)
self.assertEqual(
columns["age"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertTrue(columns["age"][1][6])
def test_add_field_remove_field(self):
"""
Adding a field and removing it removes all deferred sql referring to it.
"""
with connection.schema_editor() as editor:
# Create a table with a unique constraint on the slug field.
editor.create_model(Tag)
# Remove the slug column.
editor.remove_field(Tag, Tag._meta.get_field("slug"))
self.assertEqual(editor.deferred_sql, [])
def test_add_field_temp_default(self):
"""
Tests adding fields to models with a temporary default
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = CharField(max_length=30, default="Godwin")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertEqual(
columns["surname"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(
columns["surname"][1][6],
connection.features.interprets_empty_strings_as_nulls,
)
def test_add_field_temp_default_boolean(self):
"""
Tests adding fields to models with a temporary default where
the default is False. (#21783)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = BooleanField(default=False)
new_field.set_attributes_from_name("awesome")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# BooleanField are stored as TINYINT(1) on MySQL.
field_type = columns["awesome"][0]
self.assertEqual(
field_type, connection.features.introspected_field_types["BooleanField"]
)
def test_add_field_default_transform(self):
"""
Tests adding fields to models with a default that is not directly
valid in the database (#22581)
"""
class TestTransformField(IntegerField):
# Weird field that saves the count of items in its value
def get_default(self):
return self.default
def get_prep_value(self, value):
if value is None:
return 0
return len(value)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add the field with a default it needs to cast (to string in this case)
new_field = TestTransformField(default={1: 2})
new_field.set_attributes_from_name("thing")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is there
columns = self.column_classes(Author)
field_type, field_info = columns["thing"]
self.assertEqual(
field_type, connection.features.introspected_field_types["IntegerField"]
)
# Make sure the values were transformed correctly
self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2)
def test_add_field_o2o_nullable(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Note)
new_field = OneToOneField(Note, CASCADE, null=True)
new_field.set_attributes_from_name("note")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertIn("note_id", columns)
self.assertTrue(columns["note_id"][1][6])
def test_add_field_binary(self):
"""
Tests binary fields get a sane default (#22851)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field
new_field = BinaryField(blank=True)
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# MySQL annoyingly uses the same backend, so it'll come back as one of
# these two types.
self.assertIn(columns["bits"][0], ("BinaryField", "TextField"))
def test_add_field_durationfield_with_default(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
new_field = DurationField(default=datetime.timedelta(minutes=10))
new_field.set_attributes_from_name("duration")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertEqual(
columns["duration"][0],
connection.features.introspected_field_types["DurationField"],
)
@unittest.skipUnless(connection.vendor == "mysql", "MySQL specific")
def test_add_binaryfield_mediumblob(self):
"""
Test adding a custom-sized binary field on MySQL (#24846).
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field with default
new_field = MediumBlobField(blank=True, default=b"123")
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# Introspection treats BLOBs as TextFields
self.assertEqual(columns["bits"][0], "TextField")
@isolate_apps("schema")
def test_add_auto_field(self):
class AddAutoFieldModel(Model):
name = CharField(max_length=255, primary_key=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(AddAutoFieldModel)
self.isolated_local_models = [AddAutoFieldModel]
old_field = AddAutoFieldModel._meta.get_field("name")
new_field = CharField(max_length=255)
new_field.set_attributes_from_name("name")
new_field.model = AddAutoFieldModel
with connection.schema_editor() as editor:
editor.alter_field(AddAutoFieldModel, old_field, new_field)
new_auto_field = AutoField(primary_key=True)
new_auto_field.set_attributes_from_name("id")
new_auto_field.model = AddAutoFieldModel()
with connection.schema_editor() as editor:
editor.add_field(AddAutoFieldModel, new_auto_field)
# Crashes on PostgreSQL when the GENERATED BY suffix is missing.
AddAutoFieldModel.objects.create(name="test")
def test_remove_field(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
with CaptureQueriesContext(connection) as ctx:
editor.remove_field(Author, Author._meta.get_field("name"))
columns = self.column_classes(Author)
self.assertNotIn("name", columns)
if getattr(connection.features, "can_alter_table_drop_column", True):
# Table is not rebuilt.
self.assertIs(
any("CREATE TABLE" in query["sql"] for query in ctx.captured_queries),
False,
)
self.assertIs(
any("DROP TABLE" in query["sql"] for query in ctx.captured_queries),
False,
)
def test_remove_indexed_field(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorCharFieldWithIndex)
with connection.schema_editor() as editor:
editor.remove_field(
AuthorCharFieldWithIndex,
AuthorCharFieldWithIndex._meta.get_field("char_field"),
)
columns = self.column_classes(AuthorCharFieldWithIndex)
self.assertNotIn("char_field", columns)
def test_alter(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(
bool(columns["name"][1][6]),
bool(connection.features.interprets_empty_strings_as_nulls),
)
# Alter the name field to a TextField
old_field = Author._meta.get_field("name")
new_field = TextField(null=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
self.assertEqual(columns["name"][0], "TextField")
self.assertTrue(columns["name"][1][6])
# Change nullability again
new_field2 = TextField(null=False)
new_field2.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
columns = self.column_classes(Author)
self.assertEqual(columns["name"][0], "TextField")
self.assertEqual(
bool(columns["name"][1][6]),
bool(connection.features.interprets_empty_strings_as_nulls),
)
def test_alter_auto_field_to_integer_field(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change AutoField to IntegerField
old_field = Author._meta.get_field("id")
new_field = IntegerField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Now that ID is an IntegerField, the database raises an error if it
# isn't provided.
if not connection.features.supports_unspecified_pk:
with self.assertRaises(DatabaseError):
Author.objects.create()
def test_alter_auto_field_to_char_field(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change AutoField to CharField
old_field = Author._meta.get_field("id")
new_field = CharField(primary_key=True, max_length=50)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
@isolate_apps("schema")
def test_alter_auto_field_quoted_db_column(self):
class Foo(Model):
id = AutoField(primary_key=True, db_column='"quoted_id"')
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = Foo
new_field.db_column = '"quoted_id"'
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
Foo.objects.create()
def test_alter_not_unique_field_to_primary_key(self):
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change UUIDField to primary key.
old_field = Author._meta.get_field("uuid")
new_field = UUIDField(primary_key=True)
new_field.set_attributes_from_name("uuid")
new_field.model = Author
with connection.schema_editor() as editor:
editor.remove_field(Author, Author._meta.get_field("id"))
editor.alter_field(Author, old_field, new_field, strict=True)
# Redundant unique constraint is not added.
count = self.get_constraints_count(
Author._meta.db_table,
Author._meta.get_field("uuid").column,
None,
)
self.assertLessEqual(count["uniques"], 1)
@isolate_apps("schema")
def test_alter_primary_key_quoted_db_table(self):
class Foo(Model):
class Meta:
app_label = "schema"
db_table = '"foo"'
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = Foo
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
Foo.objects.create()
def test_alter_text_field(self):
# Regression for "BLOB/TEXT column 'info' can't have a default value")
# on MySQL.
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = TextField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
def test_alter_text_field_to_not_null_with_default_value(self):
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("address")
new_field = TextField(blank=True, default="", null=False)
new_field.set_attributes_from_name("address")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
@skipUnlessDBFeature("can_defer_constraint_checks", "can_rollback_ddl")
def test_alter_fk_checks_deferred_constraints(self):
"""
#25492 - Altering a foreign key's structure and data in the same
transaction.
"""
with connection.schema_editor() as editor:
editor.create_model(Node)
old_field = Node._meta.get_field("parent")
new_field = ForeignKey(Node, CASCADE)
new_field.set_attributes_from_name("parent")
parent = Node.objects.create()
with connection.schema_editor() as editor:
# Update the parent FK to create a deferred constraint check.
Node.objects.update(parent=parent)
editor.alter_field(Node, old_field, new_field, strict=True)
@isolate_apps("schema")
def test_alter_null_with_default_value_deferred_constraints(self):
class Publisher(Model):
class Meta:
app_label = "schema"
class Article(Model):
publisher = ForeignKey(Publisher, CASCADE)
title = CharField(max_length=50, null=True)
description = CharField(max_length=100, null=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Publisher)
editor.create_model(Article)
self.isolated_local_models = [Article, Publisher]
publisher = Publisher.objects.create()
Article.objects.create(publisher=publisher)
old_title = Article._meta.get_field("title")
new_title = CharField(max_length=50, null=False, default="")
new_title.set_attributes_from_name("title")
old_description = Article._meta.get_field("description")
new_description = CharField(max_length=100, null=False, default="")
new_description.set_attributes_from_name("description")
with connection.schema_editor() as editor:
editor.alter_field(Article, old_title, new_title, strict=True)
editor.alter_field(Article, old_description, new_description, strict=True)
def test_alter_text_field_to_date_field(self):
"""
#25002 - Test conversion of text field to date field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info="1988-05-05")
old_field = Note._meta.get_field("info")
new_field = DateField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns["info"][1][6])
def test_alter_text_field_to_datetime_field(self):
"""
#25002 - Test conversion of text field to datetime field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info="1988-05-05 3:16:17.4567")
old_field = Note._meta.get_field("info")
new_field = DateTimeField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns["info"][1][6])
def test_alter_text_field_to_time_field(self):
"""
#25002 - Test conversion of text field to time field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info="3:16:17.4567")
old_field = Note._meta.get_field("info")
new_field = TimeField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns["info"][1][6])
@skipIfDBFeature("interprets_empty_strings_as_nulls")
def test_alter_textual_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=50)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
@skipUnlessDBFeature("interprets_empty_strings_as_nulls")
def test_alter_textual_field_not_null_to_null(self):
"""
Nullability for textual fields is preserved on databases that
interpret empty strings as NULLs.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
columns = self.column_classes(Author)
# Field is nullable.
self.assertTrue(columns["uuid"][1][6])
# Change to NOT NULL.
old_field = Author._meta.get_field("uuid")
new_field = SlugField(null=False, blank=True)
new_field.set_attributes_from_name("uuid")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
# Nullability is preserved.
self.assertTrue(columns["uuid"][1][6])
def test_alter_numeric_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug="aaa")
old_field = UniqueTest._meta.get_field("year")
new_field = BigIntegerField()
new_field.set_attributes_from_name("year")
with connection.schema_editor() as editor:
editor.alter_field(UniqueTest, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug="bbb")
def test_alter_null_to_not_null(self):
"""
#23609 - Tests handling of default values when altering from NULL to NOT NULL.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertTrue(columns["height"][1][6])
# Create some test data
Author.objects.create(name="Not null author", height=12)
Author.objects.create(name="Null author")
# Verify null value
self.assertEqual(Author.objects.get(name="Not null author").height, 12)
self.assertIsNone(Author.objects.get(name="Null author").height)
# Alter the height field to NOT NULL with default
old_field = Author._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
self.assertFalse(columns["height"][1][6])
# Verify default value
self.assertEqual(Author.objects.get(name="Not null author").height, 12)
self.assertEqual(Author.objects.get(name="Null author").height, 42)
def test_alter_charfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_nulls when changing a CharField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change the CharField to null
old_field = Author._meta.get_field("name")
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_char_field_decrease_length(self):
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
Author.objects.create(name="x" * 255)
# Change max_length of CharField.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
msg = "value too long for type character varying(254)"
with self.assertRaisesMessage(DataError, msg):
editor.alter_field(Author, old_field, new_field, strict=True)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_with_custom_db_type(self):
from django.contrib.postgres.fields import ArrayField
class Foo(Model):
field = ArrayField(CharField(max_length=255))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("field")
new_field = ArrayField(CharField(max_length=16))
new_field.set_attributes_from_name("field")
new_field.model = Foo
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_array_field_decrease_base_field_length(self):
from django.contrib.postgres.fields import ArrayField
class ArrayModel(Model):
field = ArrayField(CharField(max_length=16))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(ArrayModel)
self.isolated_local_models = [ArrayModel]
ArrayModel.objects.create(field=["x" * 16])
old_field = ArrayModel._meta.get_field("field")
new_field = ArrayField(CharField(max_length=15))
new_field.set_attributes_from_name("field")
new_field.model = ArrayModel
with connection.schema_editor() as editor:
msg = "value too long for type character varying(15)"
with self.assertRaisesMessage(DataError, msg):
editor.alter_field(ArrayModel, old_field, new_field, strict=True)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_array_field_decrease_nested_base_field_length(self):
from django.contrib.postgres.fields import ArrayField
class ArrayModel(Model):
field = ArrayField(ArrayField(CharField(max_length=16)))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(ArrayModel)
self.isolated_local_models = [ArrayModel]
ArrayModel.objects.create(field=[["x" * 16]])
old_field = ArrayModel._meta.get_field("field")
new_field = ArrayField(ArrayField(CharField(max_length=15)))
new_field.set_attributes_from_name("field")
new_field.model = ArrayModel
with connection.schema_editor() as editor:
msg = "value too long for type character varying(15)"
with self.assertRaisesMessage(DataError, msg):
editor.alter_field(ArrayModel, old_field, new_field, strict=True)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
@skipUnlessDBFeature(
"supports_collation_on_charfield",
"supports_non_deterministic_collations",
)
def test_db_collation_arrayfield(self):
from django.contrib.postgres.fields import ArrayField
ci_collation = "case_insensitive"
cs_collation = "en-x-icu"
def drop_collation():
with connection.cursor() as cursor:
cursor.execute(f"DROP COLLATION IF EXISTS {ci_collation}")
with connection.cursor() as cursor:
cursor.execute(
f"CREATE COLLATION IF NOT EXISTS {ci_collation} (provider = icu, "
f"locale = 'und-u-ks-level2', deterministic = false)"
)
self.addCleanup(drop_collation)
class ArrayModel(Model):
field = ArrayField(CharField(max_length=16, db_collation=ci_collation))
class Meta:
app_label = "schema"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(ArrayModel)
self.isolated_local_models = [ArrayModel]
self.assertEqual(
self.get_column_collation(ArrayModel._meta.db_table, "field"),
ci_collation,
)
# Alter collation.
old_field = ArrayModel._meta.get_field("field")
new_field_cs = ArrayField(CharField(max_length=16, db_collation=cs_collation))
new_field_cs.set_attributes_from_name("field")
new_field_cs.model = ArrayField
with connection.schema_editor() as editor:
editor.alter_field(ArrayModel, old_field, new_field_cs, strict=True)
self.assertEqual(
self.get_column_collation(ArrayModel._meta.db_table, "field"),
cs_collation,
)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
@skipUnlessDBFeature(
"supports_collation_on_charfield",
"supports_non_deterministic_collations",
)
def test_unique_with_collation_charfield(self):
ci_collation = "case_insensitive"
def drop_collation():
with connection.cursor() as cursor:
cursor.execute(f"DROP COLLATION IF EXISTS {ci_collation}")
with connection.cursor() as cursor:
cursor.execute(
f"CREATE COLLATION IF NOT EXISTS {ci_collation} (provider = icu, "
f"locale = 'und-u-ks-level2', deterministic = false)"
)
self.addCleanup(drop_collation)
class CiCharModel(Model):
field = CharField(max_length=16, db_collation=ci_collation, unique=True)
class Meta:
app_label = "schema"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(CiCharModel)
self.isolated_local_models = [CiCharModel]
self.assertEqual(
self.get_column_collation(CiCharModel._meta.db_table, "field"),
ci_collation,
)
self.assertIn("field", self.get_uniques(CiCharModel._meta.db_table))
def test_alter_textfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_nulls when changing a TextField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
# Change the TextField to null
old_field = Note._meta.get_field("info")
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
def test_alter_null_to_not_null_keeping_default(self):
"""
#23738 - Can change a nullable field with default to non-nullable
with the same default.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
# Ensure the field is right to begin with
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertTrue(columns["height"][1][6])
# Alter the height field to NOT NULL keeping the previous default
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorWithDefaultHeight, old_field, new_field, strict=True
)
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertFalse(columns["height"][1][6])
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_fk(self):
"""
Tests altering of FKs
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
# Alter the FK
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE, editable=False)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_to_fk(self):
"""
#24447 - Tests adding a FK constraint for an existing column
"""
class LocalBook(Model):
author = IntegerField()
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalBook]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBook)
# Ensure no FK constraint exists
constraints = self.get_constraints(LocalBook._meta.db_table)
for details in constraints.values():
if details["foreign_key"]:
self.fail(
"Found an unexpected FK constraint to %s" % details["columns"]
)
old_field = LocalBook._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(LocalBook, old_field, new_field, strict=True)
self.assertForeignKeyExists(LocalBook, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_alter_o2o_to_fk(self):
"""
#24163 - Tests altering of OneToOneField to ForeignKey
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
# Ensure the field is right to begin with
columns = self.column_classes(BookWithO2O)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is unique
author = Author.objects.create(name="Joe")
BookWithO2O.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
BookWithO2O.objects.all().delete()
self.assertForeignKeyExists(BookWithO2O, "author_id", "schema_author")
# Alter the OneToOneField to ForeignKey
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is not unique anymore
Book.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
Book.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_alter_fk_to_o2o(self):
"""
#24163 - Tests altering of ForeignKey to OneToOneField
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is not unique
author = Author.objects.create(name="Joe")
Book.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
Book.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
Book.objects.all().delete()
self.assertForeignKeyExists(Book, "author_id", "schema_author")
# Alter the ForeignKey to OneToOneField
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
columns = self.column_classes(BookWithO2O)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is unique now
BookWithO2O.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
self.assertForeignKeyExists(BookWithO2O, "author_id", "schema_author")
def test_alter_field_fk_to_o2o(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
expected_indexes = 1 if connection.features.indexes_foreign_keys else 0
# Check the index is right to begin with.
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(
counts,
{"fks": expected_fks, "uniques": 0, "indexes": expected_indexes},
)
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field)
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The index on ForeignKey is replaced with a unique constraint for
# OneToOneField.
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
def test_autofield_to_o2o(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Note)
# Rename the field.
old_field = Author._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("note_ptr")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Alter AutoField to OneToOneField.
new_field_o2o = OneToOneField(Note, CASCADE)
new_field_o2o.set_attributes_from_name("note_ptr")
new_field_o2o.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field_o2o, strict=True)
columns = self.column_classes(Author)
field_type, _ = columns["note_ptr_id"]
self.assertEqual(
field_type, connection.features.introspected_field_types["IntegerField"]
)
def test_alter_field_fk_keeps_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
expected_indexes = 1 if connection.features.indexes_foreign_keys else 0
# Check the index is right to begin with.
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(
counts,
{"fks": expected_fks, "uniques": 0, "indexes": expected_indexes},
)
old_field = Book._meta.get_field("author")
# on_delete changed from CASCADE.
new_field = ForeignKey(Author, PROTECT)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The index remains.
self.assertEqual(
counts,
{"fks": expected_fks, "uniques": 0, "indexes": expected_indexes},
)
def test_alter_field_o2o_to_fk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
# Check the unique constraint is right to begin with.
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field)
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The unique constraint on OneToOneField is replaced with an index for
# ForeignKey.
self.assertEqual(counts, {"fks": expected_fks, "uniques": 0, "indexes": 1})
def test_alter_field_o2o_keeps_unique(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
# Check the unique constraint is right to begin with.
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
old_field = BookWithO2O._meta.get_field("author")
# on_delete changed from CASCADE.
new_field = OneToOneField(Author, PROTECT)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The unique constraint remains.
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
@skipUnlessDBFeature("ignores_table_name_case")
def test_alter_db_table_case(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Alter the case of the table
old_table_name = Author._meta.db_table
with connection.schema_editor() as editor:
editor.alter_db_table(Author, old_table_name, old_table_name.upper())
def test_alter_implicit_id_to_explicit(self):
"""
Should be able to convert an implicit "id" field to an explicit "id"
primary key field.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# This will fail if DROP DEFAULT is inadvertently executed on this
# field which drops the id sequence, at least on PostgreSQL.
Author.objects.create(name="Foo")
Author.objects.create(name="Bar")
def test_alter_autofield_pk_to_bigautofield_pk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
Author.objects.create(name="Foo", pk=1)
with connection.cursor() as cursor:
sequence_reset_sqls = connection.ops.sequence_reset_sql(
no_style(), [Author]
)
if sequence_reset_sqls:
cursor.execute(sequence_reset_sqls[0])
self.assertIsNotNone(Author.objects.create(name="Bar"))
def test_alter_autofield_pk_to_smallautofield_pk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = SmallAutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
Author.objects.create(name="Foo", pk=1)
with connection.cursor() as cursor:
sequence_reset_sqls = connection.ops.sequence_reset_sql(
no_style(), [Author]
)
if sequence_reset_sqls:
cursor.execute(sequence_reset_sqls[0])
self.assertIsNotNone(Author.objects.create(name="Bar"))
def test_alter_int_pk_to_autofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
AutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field("i")
new_field = AutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# A model representing the updated model.
class IntegerPKToAutoField(Model):
i = AutoField(primary_key=True)
j = IntegerField(unique=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = IntegerPK._meta.db_table
# An id (i) is generated by the database.
obj = IntegerPKToAutoField.objects.create(j=1)
self.assertIsNotNone(obj.i)
def test_alter_int_pk_to_bigautofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
BigAutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field("i")
new_field = BigAutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# A model representing the updated model.
class IntegerPKToBigAutoField(Model):
i = BigAutoField(primary_key=True)
j = IntegerField(unique=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = IntegerPK._meta.db_table
# An id (i) is generated by the database.
obj = IntegerPKToBigAutoField.objects.create(j=1)
self.assertIsNotNone(obj.i)
@isolate_apps("schema")
def test_alter_smallint_pk_to_smallautofield_pk(self):
"""
Should be able to rename an SmallIntegerField(primary_key=True) to
SmallAutoField(primary_key=True).
"""
class SmallIntegerPK(Model):
i = SmallIntegerField(primary_key=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(SmallIntegerPK)
self.isolated_local_models = [SmallIntegerPK]
old_field = SmallIntegerPK._meta.get_field("i")
new_field = SmallAutoField(primary_key=True)
new_field.model = SmallIntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(SmallIntegerPK, old_field, new_field, strict=True)
def test_alter_int_pk_to_int_unique(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
IntegerField(unique=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
# Delete the old PK
old_field = IntegerPK._meta.get_field("i")
new_field = IntegerField(unique=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# The primary key constraint is gone. Result depends on database:
# 'id' for SQLite, None for others (must not be 'i').
self.assertIn(self.get_primary_key(IntegerPK._meta.db_table), ("id", None))
# Set up a model class as it currently stands. The original IntegerPK
# class is now out of date and some backends make use of the whole
# model class when modifying a field (such as sqlite3 when remaking a
# table) so an outdated model class leads to incorrect results.
class Transitional(Model):
i = IntegerField(unique=True)
j = IntegerField(unique=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = "INTEGERPK"
# model requires a new PK
old_field = Transitional._meta.get_field("j")
new_field = IntegerField(primary_key=True)
new_field.model = Transitional
new_field.set_attributes_from_name("j")
with connection.schema_editor() as editor:
editor.alter_field(Transitional, old_field, new_field, strict=True)
# Create a model class representing the updated model.
class IntegerUnique(Model):
i = IntegerField(unique=True)
j = IntegerField(primary_key=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = "INTEGERPK"
# Ensure unique constraint works.
IntegerUnique.objects.create(i=1, j=1)
with self.assertRaises(IntegrityError):
IntegerUnique.objects.create(i=1, j=2)
def test_rename(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertNotIn("display_name", columns)
# Alter the name field's name
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("display_name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
self.assertEqual(
columns["display_name"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertNotIn("name", columns)
@isolate_apps("schema")
def test_rename_referenced_field(self):
class Author(Model):
name = CharField(max_length=255, unique=True)
class Meta:
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE, to_field="name")
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("renamed")
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
editor.alter_field(Author, Author._meta.get_field("name"), new_field)
# Ensure the foreign key reference was updated.
self.assertForeignKeyExists(Book, "author_id", "schema_author", "renamed")
@skipIfDBFeature("interprets_empty_strings_as_nulls")
def test_rename_keep_null_status(self):
"""
Renaming a field shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = TextField()
new_field.set_attributes_from_name("detail_info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns["detail_info"][0], "TextField")
self.assertNotIn("info", columns)
with self.assertRaises(IntegrityError):
NoteRename.objects.create(detail_info=None)
def _test_m2m_create(self, M2MFieldClass):
"""
Tests M2M fields on models during creation
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author, CASCADE)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2M)
# Ensure there is now an m2m table there
columns = self.column_classes(
LocalBookWithM2M._meta.get_field("tags").remote_field.through
)
self.assertEqual(
columns["tagm2mtest_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
def test_m2m_create(self):
self._test_m2m_create(ManyToManyField)
def test_m2m_create_custom(self):
self._test_m2m_create(CustomManyToManyField)
def test_m2m_create_inherited(self):
self._test_m2m_create(InheritedManyToManyField)
def _test_m2m_create_through(self, M2MFieldClass):
"""
Tests M2M fields on models during creation with through models
"""
class LocalTagThrough(Model):
book = ForeignKey("schema.LocalBookWithM2MThrough", CASCADE)
tag = ForeignKey("schema.TagM2MTest", CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
class LocalBookWithM2MThrough(Model):
tags = M2MFieldClass(
"TagM2MTest", related_name="books", through=LocalTagThrough
)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalTagThrough, LocalBookWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalTagThrough)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2MThrough)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalTagThrough)
self.assertEqual(
columns["book_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertEqual(
columns["tag_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
def test_m2m_create_through(self):
self._test_m2m_create_through(ManyToManyField)
def test_m2m_create_through_custom(self):
self._test_m2m_create_through(CustomManyToManyField)
def test_m2m_create_through_inherited(self):
self._test_m2m_create_through(InheritedManyToManyField)
def test_m2m_through_remove(self):
class LocalAuthorNoteThrough(Model):
book = ForeignKey("schema.Author", CASCADE)
tag = ForeignKey("self", CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
class LocalNoteWithM2MThrough(Model):
authors = ManyToManyField("schema.Author", through=LocalAuthorNoteThrough)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorNoteThrough, LocalNoteWithM2MThrough]
# Create the tables.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalAuthorNoteThrough)
editor.create_model(LocalNoteWithM2MThrough)
# Remove the through parameter.
old_field = LocalNoteWithM2MThrough._meta.get_field("authors")
new_field = ManyToManyField("Author")
new_field.set_attributes_from_name("authors")
msg = (
f"Cannot alter field {old_field} into {new_field} - they are not "
f"compatible types (you cannot alter to or from M2M fields, or add or "
f"remove through= on M2M fields)"
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
editor.alter_field(LocalNoteWithM2MThrough, old_field, new_field)
def _test_m2m(self, M2MFieldClass):
"""
Tests adding/removing M2M fields on models
"""
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorWithM2M)
editor.create_model(TagM2MTest)
# Create an M2M field
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors")
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Ensure there's no m2m table there
with self.assertRaises(DatabaseError):
self.column_classes(new_field.remote_field.through)
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
# Ensure there is now an m2m table there
columns = self.column_classes(new_field.remote_field.through)
self.assertEqual(
columns["tagm2mtest_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# "Alter" the field. This should not rename the DB table to itself.
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2M, new_field, new_field, strict=True)
# Remove the M2M table again
with connection.schema_editor() as editor:
editor.remove_field(LocalAuthorWithM2M, new_field)
# Ensure there's no m2m table there
with self.assertRaises(DatabaseError):
self.column_classes(new_field.remote_field.through)
# Make sure the model state is coherent with the table one now that
# we've removed the tags field.
opts = LocalAuthorWithM2M._meta
opts.local_many_to_many.remove(new_field)
del new_apps.all_models["schema"][
new_field.remote_field.through._meta.model_name
]
opts._expire_cache()
def test_m2m(self):
self._test_m2m(ManyToManyField)
def test_m2m_custom(self):
self._test_m2m(CustomManyToManyField)
def test_m2m_inherited(self):
self._test_m2m(InheritedManyToManyField)
def _test_m2m_through_alter(self, M2MFieldClass):
"""
Tests altering M2Ms with explicit through models (should no-op)
"""
class LocalAuthorTag(Model):
author = ForeignKey("schema.LocalAuthorWithM2MThrough", CASCADE)
tag = ForeignKey("schema.TagM2MTest", CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
class LocalAuthorWithM2MThrough(Model):
name = CharField(max_length=255)
tags = M2MFieldClass(
"schema.TagM2MTest", related_name="authors", through=LocalAuthorTag
)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorTag)
editor.create_model(LocalAuthorWithM2MThrough)
editor.create_model(TagM2MTest)
# Ensure the m2m table is there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
# "Alter" the field's blankness. This should not actually do anything.
old_field = LocalAuthorWithM2MThrough._meta.get_field("tags")
new_field = M2MFieldClass(
"schema.TagM2MTest", related_name="authors", through=LocalAuthorTag
)
new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags")
with connection.schema_editor() as editor:
editor.alter_field(
LocalAuthorWithM2MThrough, old_field, new_field, strict=True
)
# Ensure the m2m table is still there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
def test_m2m_through_alter(self):
self._test_m2m_through_alter(ManyToManyField)
def test_m2m_through_alter_custom(self):
self._test_m2m_through_alter(CustomManyToManyField)
def test_m2m_through_alter_inherited(self):
self._test_m2m_through_alter(InheritedManyToManyField)
def _test_m2m_repoint(self, M2MFieldClass):
"""
Tests repointing M2M fields
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author, CASCADE)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBookWithM2M)
editor.create_model(TagM2MTest)
editor.create_model(UniqueTest)
# Ensure the M2M exists and points to TagM2MTest
if connection.features.supports_foreign_keys:
self.assertForeignKeyExists(
LocalBookWithM2M._meta.get_field("tags").remote_field.through,
"tagm2mtest_id",
"schema_tagm2mtest",
)
# Repoint the M2M
old_field = LocalBookWithM2M._meta.get_field("tags")
new_field = M2MFieldClass(UniqueTest)
new_field.contribute_to_class(LocalBookWithM2M, "uniques")
with connection.schema_editor() as editor:
editor.alter_field(LocalBookWithM2M, old_field, new_field, strict=True)
# Ensure old M2M is gone
with self.assertRaises(DatabaseError):
self.column_classes(
LocalBookWithM2M._meta.get_field("tags").remote_field.through
)
# This model looks like the new model and is used for teardown.
opts = LocalBookWithM2M._meta
opts.local_many_to_many.remove(old_field)
# Ensure the new M2M exists and points to UniqueTest
if connection.features.supports_foreign_keys:
self.assertForeignKeyExists(
new_field.remote_field.through, "uniquetest_id", "schema_uniquetest"
)
def test_m2m_repoint(self):
self._test_m2m_repoint(ManyToManyField)
def test_m2m_repoint_custom(self):
self._test_m2m_repoint(CustomManyToManyField)
def test_m2m_repoint_inherited(self):
self._test_m2m_repoint(InheritedManyToManyField)
@isolate_apps("schema")
def test_m2m_rename_field_in_target_model(self):
class LocalTagM2MTest(Model):
title = CharField(max_length=255)
class Meta:
app_label = "schema"
class LocalM2M(Model):
tags = ManyToManyField(LocalTagM2MTest)
class Meta:
app_label = "schema"
# Create the tables.
with connection.schema_editor() as editor:
editor.create_model(LocalM2M)
editor.create_model(LocalTagM2MTest)
self.isolated_local_models = [LocalM2M, LocalTagM2MTest]
# Ensure the m2m table is there.
self.assertEqual(len(self.column_classes(LocalM2M)), 1)
# Alter a field in LocalTagM2MTest.
old_field = LocalTagM2MTest._meta.get_field("title")
new_field = CharField(max_length=254)
new_field.contribute_to_class(LocalTagM2MTest, "title1")
# @isolate_apps() and inner models are needed to have the model
# relations populated, otherwise this doesn't act as a regression test.
self.assertEqual(len(new_field.model._meta.related_objects), 1)
with connection.schema_editor() as editor:
editor.alter_field(LocalTagM2MTest, old_field, new_field, strict=True)
# Ensure the m2m table is still there.
self.assertEqual(len(self.column_classes(LocalM2M)), 1)
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
def test_check_constraints(self):
"""
Tests creating/deleting CHECK constraints
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the constraint exists
constraints = self.get_constraints(Author._meta.db_table)
if not any(
details["columns"] == ["height"] and details["check"]
for details in constraints.values()
):
self.fail("No check constraint for height found")
# Alter the column to remove it
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for details in constraints.values():
if details["columns"] == ["height"] and details["check"]:
self.fail("Check constraint for height found")
# Alter the column to re-add it
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
if not any(
details["columns"] == ["height"] and details["check"]
for details in constraints.values()
):
self.fail("No check constraint for height found")
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
@isolate_apps("schema")
def test_check_constraint_timedelta_param(self):
class DurationModel(Model):
duration = DurationField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(DurationModel)
self.isolated_local_models = [DurationModel]
constraint_name = "duration_gte_5_minutes"
constraint = CheckConstraint(
check=Q(duration__gt=datetime.timedelta(minutes=5)),
name=constraint_name,
)
DurationModel._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(DurationModel, constraint)
constraints = self.get_constraints(DurationModel._meta.db_table)
self.assertIn(constraint_name, constraints)
with self.assertRaises(IntegrityError), atomic():
DurationModel.objects.create(duration=datetime.timedelta(minutes=4))
DurationModel.objects.create(duration=datetime.timedelta(minutes=10))
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
def test_remove_field_check_does_not_remove_meta_constraints(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the custom check constraint
constraint = CheckConstraint(
check=Q(height__gte=0), name="author_height_gte_0_check"
)
custom_constraint_name = constraint.name
Author._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
# Ensure the constraints exist
constraints = self.get_constraints(Author._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["height"]
and details["check"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Alter the column to remove field check
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["height"]
and details["check"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 0)
# Alter the column to re-add field check
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["height"]
and details["check"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the check constraint
with connection.schema_editor() as editor:
Author._meta.constraints = []
editor.remove_constraint(Author, constraint)
def test_unique(self):
"""
Tests removing and adding unique constraints to a single column.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the field is unique to begin with
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be non-unique
old_field = Tag._meta.get_field("slug")
new_field = SlugField(unique=False)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
# Ensure the field is no longer unique
Tag.objects.create(title="foo", slug="foo")
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be unique
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
# Ensure the field is unique again
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Rename the field
new_field3 = SlugField(unique=True)
new_field3.set_attributes_from_name("slug2")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field2, new_field3, strict=True)
# Ensure the field is still unique
TagUniqueRename.objects.create(title="foo", slug2="foo")
with self.assertRaises(IntegrityError):
TagUniqueRename.objects.create(title="bar", slug2="foo")
Tag.objects.all().delete()
def test_unique_name_quoting(self):
old_table_name = TagUniqueRename._meta.db_table
try:
with connection.schema_editor() as editor:
editor.create_model(TagUniqueRename)
editor.alter_db_table(TagUniqueRename, old_table_name, "unique-table")
TagUniqueRename._meta.db_table = "unique-table"
# This fails if the unique index name isn't quoted.
editor.alter_unique_together(TagUniqueRename, [], (("title", "slug2"),))
finally:
with connection.schema_editor() as editor:
editor.delete_model(TagUniqueRename)
TagUniqueRename._meta.db_table = old_table_name
@isolate_apps("schema")
@skipUnlessDBFeature("supports_foreign_keys")
def test_unique_no_unnecessary_fk_drops(self):
"""
If AlterField isn't selective about dropping foreign key constraints
when modifying a field with a unique constraint, the AlterField
incorrectly drops and recreates the Book.author foreign key even though
it doesn't restrict the field being changed (#29193).
"""
class Author(Model):
name = CharField(max_length=254, unique=True)
class Meta:
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
new_field = CharField(max_length=255, unique=True)
new_field.model = Author
new_field.set_attributes_from_name("name")
with self.assertLogs("django.db.backends.schema", "DEBUG") as cm:
with connection.schema_editor() as editor:
editor.alter_field(Author, Author._meta.get_field("name"), new_field)
# One SQL statement is executed to alter the field.
self.assertEqual(len(cm.records), 1)
@isolate_apps("schema")
def test_unique_and_reverse_m2m(self):
"""
AlterField can modify a unique field when there's a reverse M2M
relation on the model.
"""
class Tag(Model):
title = CharField(max_length=255)
slug = SlugField(unique=True)
class Meta:
app_label = "schema"
class Book(Model):
tags = ManyToManyField(Tag, related_name="books")
class Meta:
app_label = "schema"
self.isolated_local_models = [Book._meta.get_field("tags").remote_field.through]
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Book)
new_field = SlugField(max_length=75, unique=True)
new_field.model = Tag
new_field.set_attributes_from_name("slug")
with self.assertLogs("django.db.backends.schema", "DEBUG") as cm:
with connection.schema_editor() as editor:
editor.alter_field(Tag, Tag._meta.get_field("slug"), new_field)
# One SQL statement is executed to alter the field.
self.assertEqual(len(cm.records), 1)
# Ensure that the field is still unique.
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_field_unique_does_not_remove_meta_constraints(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorWithUniqueName)
self.local_models = [AuthorWithUniqueName]
# Add the custom unique constraint
constraint = UniqueConstraint(fields=["name"], name="author_name_uniq")
custom_constraint_name = constraint.name
AuthorWithUniqueName._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(AuthorWithUniqueName, constraint)
# Ensure the constraints exist
constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Alter the column to remove field uniqueness
old_field = AuthorWithUniqueName._meta.get_field("name")
new_field = CharField(max_length=255)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithUniqueName, old_field, new_field, strict=True)
constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 0)
# Alter the column to re-add field uniqueness
new_field2 = AuthorWithUniqueName._meta.get_field("name")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithUniqueName, new_field, new_field2, strict=True)
constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the unique constraint
with connection.schema_editor() as editor:
AuthorWithUniqueName._meta.constraints = []
editor.remove_constraint(AuthorWithUniqueName, constraint)
def test_unique_together(self):
"""
Tests removing and adding unique_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
# Ensure the fields are unique to begin with
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2011, slug="foo")
UniqueTest.objects.create(year=2011, slug="bar")
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter the model to its non-unique-together companion
with connection.schema_editor() as editor:
editor.alter_unique_together(
UniqueTest, UniqueTest._meta.unique_together, []
)
# Ensure the fields are no longer unique
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_unique_together(
UniqueTest, [], UniqueTest._meta.unique_together
)
# Ensure the fields are unique again
UniqueTest.objects.create(year=2012, slug="foo")
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
def test_unique_together_with_fk(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [["author", "title"]])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [["author", "title"]], [])
def test_unique_together_with_fk_with_existing_index(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key, where the foreign key is added after the model is
created.
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithoutAuthor)
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
editor.add_field(BookWithoutAuthor, new_field)
# Ensure the fields aren't unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [["author", "title"]])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [["author", "title"]], [])
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_unique_together_does_not_remove_meta_constraints(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorWithUniqueNameAndBirthday)
self.local_models = [AuthorWithUniqueNameAndBirthday]
# Add the custom unique constraint
constraint = UniqueConstraint(
fields=["name", "birthday"], name="author_name_birthday_uniq"
)
custom_constraint_name = constraint.name
AuthorWithUniqueNameAndBirthday._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(AuthorWithUniqueNameAndBirthday, constraint)
# Ensure the constraints exist
constraints = self.get_constraints(
AuthorWithUniqueNameAndBirthday._meta.db_table
)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Remove unique together
unique_together = AuthorWithUniqueNameAndBirthday._meta.unique_together
with connection.schema_editor() as editor:
editor.alter_unique_together(
AuthorWithUniqueNameAndBirthday, unique_together, []
)
constraints = self.get_constraints(
AuthorWithUniqueNameAndBirthday._meta.db_table
)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 0)
# Re-add unique together
with connection.schema_editor() as editor:
editor.alter_unique_together(
AuthorWithUniqueNameAndBirthday, [], unique_together
)
constraints = self.get_constraints(
AuthorWithUniqueNameAndBirthday._meta.db_table
)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the unique constraint
with connection.schema_editor() as editor:
AuthorWithUniqueNameAndBirthday._meta.constraints = []
editor.remove_constraint(AuthorWithUniqueNameAndBirthday, constraint)
def test_unique_constraint(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(fields=["name"], name="name_uq")
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIs(sql.references_table(table), True)
self.assertIs(sql.references_column(table, "name"), True)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(Upper("name").desc(), name="func_upper_uq")
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, constraint.name, ["DESC"])
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
# SQL contains a database function.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_composite_func_unique_constraint(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithSlug)
constraint = UniqueConstraint(
Upper("title"),
Lower("slug"),
name="func_upper_lower_unq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(BookWithSlug, constraint)
sql = constraint.create_sql(BookWithSlug, editor)
table = BookWithSlug._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
# SQL contains database functions.
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "slug"), True)
sql = str(sql)
self.assertIn("UPPER(%s)" % editor.quote_name("title"), sql)
self.assertIn("LOWER(%s)" % editor.quote_name("slug"), sql)
self.assertLess(sql.index("UPPER"), sql.index("LOWER"))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(BookWithSlug, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_unique_constraint_field_and_expression(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
F("height").desc(),
"uuid",
Lower("name").asc(),
name="func_f_lower_field_unq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, constraint.name, ["DESC", "ASC", "ASC"])
constraints = self.get_constraints(table)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertEqual(len(constraints[constraint.name]["columns"]), 3)
self.assertEqual(constraints[constraint.name]["columns"][1], "uuid")
# SQL contains database functions and columns.
self.assertIs(sql.references_column(table, "height"), True)
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "uuid"), True)
self.assertIn("LOWER(%s)" % editor.quote_name("name"), str(sql))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_partial_indexes")
def test_func_unique_constraint_partial(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
Upper("name"),
name="func_upper_cond_weight_uq",
condition=Q(weight__isnull=False),
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql))
self.assertIn(
"WHERE %s IS NOT NULL" % editor.quote_name("weight"),
str(sql),
)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_covering_indexes")
def test_func_unique_constraint_covering(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
Upper("name"),
name="func_upper_covering_uq",
include=["weight", "height"],
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertEqual(
constraints[constraint.name]["columns"],
[None, "weight", "height"],
)
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "weight"), True)
self.assertIs(sql.references_column(table, "height"), True)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql))
self.assertIn(
"INCLUDE (%s, %s)"
% (
editor.quote_name("weight"),
editor.quote_name("height"),
),
str(sql),
)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_lookups(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
with register_lookup(CharField, Lower), register_lookup(IntegerField, Abs):
constraint = UniqueConstraint(
F("name__lower"),
F("weight__abs"),
name="func_lower_abs_lookup_uq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
# SQL contains columns.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "weight"), True)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_collate(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithSlug)
constraint = UniqueConstraint(
Collate(F("title"), collation=collation).desc(),
Collate("slug", collation=collation),
name="func_collate_uq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(BookWithSlug, constraint)
sql = constraint.create_sql(BookWithSlug, editor)
table = BookWithSlug._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, constraint.name, ["DESC", "ASC"])
# SQL contains columns and a collation.
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "slug"), True)
self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(BookWithSlug, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipIfDBFeature("supports_expression_indexes")
def test_func_unique_constraint_unsupported(self):
# UniqueConstraint is ignored on databases that don't support indexes on
# expressions.
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(F("name"), name="func_name_uq")
with connection.schema_editor() as editor, self.assertNumQueries(0):
self.assertIsNone(editor.add_constraint(Author, constraint))
self.assertIsNone(editor.remove_constraint(Author, constraint))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_nonexistent_field(self):
constraint = UniqueConstraint(Lower("nonexistent"), name="func_nonexistent_uq")
msg = (
"Cannot resolve keyword 'nonexistent' into field. Choices are: "
"height, id, name, uuid, weight"
)
with self.assertRaisesMessage(FieldError, msg):
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_nondeterministic(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(Random(), name="func_random_uq")
with connection.schema_editor() as editor:
with self.assertRaises(DatabaseError):
editor.add_constraint(Author, constraint)
@ignore_warnings(category=RemovedInDjango51Warning)
def test_index_together(self):
"""
Tests removing and adding index_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure there's no index on the year/slug columns first
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c["columns"] == ["slug", "title"]
),
False,
)
# Alter the model to add an index
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [], [("slug", "title")])
# Ensure there is now an index
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c["columns"] == ["slug", "title"]
),
True,
)
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [("slug", "title")], [])
# Ensure there's no index
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c["columns"] == ["slug", "title"]
),
False,
)
@ignore_warnings(category=RemovedInDjango51Warning)
def test_index_together_with_fk(self):
"""
Tests removing and adding index_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.index_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [], [["author", "title"]])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [["author", "title"]], [])
@ignore_warnings(category=RemovedInDjango51Warning)
@isolate_apps("schema")
def test_create_index_together(self):
"""
Tests creating models with index_together already defined
"""
class TagIndexed(Model):
title = CharField(max_length=255)
slug = SlugField(unique=True)
class Meta:
app_label = "schema"
index_together = [["slug", "title"]]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(TagIndexed)
self.isolated_local_models = [TagIndexed]
# Ensure there is an index
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tagindexed").values()
if c["columns"] == ["slug", "title"]
),
True,
)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
@ignore_warnings(category=RemovedInDjango51Warning)
@isolate_apps("schema")
def test_remove_index_together_does_not_remove_meta_indexes(self):
class AuthorWithIndexedNameAndBirthday(Model):
name = CharField(max_length=255)
birthday = DateField()
class Meta:
app_label = "schema"
index_together = [["name", "birthday"]]
with connection.schema_editor() as editor:
editor.create_model(AuthorWithIndexedNameAndBirthday)
self.isolated_local_models = [AuthorWithIndexedNameAndBirthday]
# Add the custom index
index = Index(fields=["name", "birthday"], name="author_name_birthday_idx")
custom_index_name = index.name
AuthorWithIndexedNameAndBirthday._meta.indexes = [index]
with connection.schema_editor() as editor:
editor.add_index(AuthorWithIndexedNameAndBirthday, index)
# Ensure the indexes exist
constraints = self.get_constraints(
AuthorWithIndexedNameAndBirthday._meta.db_table
)
self.assertIn(custom_index_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["index"]
and name != custom_index_name
]
self.assertEqual(len(other_constraints), 1)
# Remove index together
index_together = AuthorWithIndexedNameAndBirthday._meta.index_together
with connection.schema_editor() as editor:
editor.alter_index_together(
AuthorWithIndexedNameAndBirthday, index_together, []
)
constraints = self.get_constraints(
AuthorWithIndexedNameAndBirthday._meta.db_table
)
self.assertIn(custom_index_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["index"]
and name != custom_index_name
]
self.assertEqual(len(other_constraints), 0)
# Re-add index together
with connection.schema_editor() as editor:
editor.alter_index_together(
AuthorWithIndexedNameAndBirthday, [], index_together
)
constraints = self.get_constraints(
AuthorWithIndexedNameAndBirthday._meta.db_table
)
self.assertIn(custom_index_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["index"]
and name != custom_index_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the index
with connection.schema_editor() as editor:
AuthorWithIndexedNameAndBirthday._meta.indexes = []
editor.remove_index(AuthorWithIndexedNameAndBirthday, index)
@isolate_apps("schema")
def test_db_table(self):
"""
Tests renaming of the table
"""
class Author(Model):
name = CharField(max_length=255)
class Meta:
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = "schema"
# Create the table and one referring it.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there to begin with
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
# Alter the table
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
editor.alter_db_table(Author, "schema_author", "schema_otherauthor")
Author._meta.db_table = "schema_otherauthor"
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
# Ensure the foreign key reference was updated
self.assertForeignKeyExists(Book, "author_id", "schema_otherauthor")
# Alter the table again
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
editor.alter_db_table(Author, "schema_otherauthor", "schema_author")
# Ensure the table is still there
Author._meta.db_table = "schema_author"
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
def test_add_remove_index(self):
"""
Tests index addition and removal
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the table is there and has no index
self.assertNotIn("title", self.get_indexes(Author._meta.db_table))
# Add the index
index = Index(fields=["name"], name="author_title_idx")
with connection.schema_editor() as editor:
editor.add_index(Author, index)
self.assertIn("name", self.get_indexes(Author._meta.db_table))
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn("name", self.get_indexes(Author._meta.db_table))
def test_remove_db_index_doesnt_remove_custom_indexes(self):
"""
Changing db_index to False doesn't remove indexes from Meta.indexes.
"""
with connection.schema_editor() as editor:
editor.create_model(AuthorWithIndexedName)
self.local_models = [AuthorWithIndexedName]
# Ensure the table has its index
self.assertIn("name", self.get_indexes(AuthorWithIndexedName._meta.db_table))
# Add the custom index
index = Index(fields=["-name"], name="author_name_idx")
author_index_name = index.name
with connection.schema_editor() as editor:
db_index_name = editor._create_index_name(
table_name=AuthorWithIndexedName._meta.db_table,
column_names=("name",),
)
try:
AuthorWithIndexedName._meta.indexes = [index]
with connection.schema_editor() as editor:
editor.add_index(AuthorWithIndexedName, index)
old_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table)
self.assertIn(author_index_name, old_constraints)
self.assertIn(db_index_name, old_constraints)
# Change name field to db_index=False
old_field = AuthorWithIndexedName._meta.get_field("name")
new_field = CharField(max_length=255)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorWithIndexedName, old_field, new_field, strict=True
)
new_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table)
self.assertNotIn(db_index_name, new_constraints)
# The index from Meta.indexes is still in the database.
self.assertIn(author_index_name, new_constraints)
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(AuthorWithIndexedName, index)
finally:
AuthorWithIndexedName._meta.indexes = []
def test_order_index(self):
"""
Indexes defined with ordering (ASC/DESC) defined on column
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
# The table doesn't have an index
self.assertNotIn("title", self.get_indexes(Author._meta.db_table))
index_name = "author_name_idx"
# Add the index
index = Index(fields=["name", "-weight"], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(Author, index)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(Author._meta.db_table, index_name, ["ASC", "DESC"])
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
def test_indexes(self):
"""
Tests creation/altering of indexes
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there and has the right index
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to remove the index
old_field = Book._meta.get_field("title")
new_field = CharField(max_length=100, db_index=False)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the table is there and has no index
self.assertNotIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to re-add the index
new_field2 = Book._meta.get_field("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, new_field, new_field2, strict=True)
# Ensure the table is there and has the index again
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Add a unique column, verify that creates an implicit index
new_field3 = BookWithSlug._meta.get_field("slug")
with connection.schema_editor() as editor:
editor.add_field(Book, new_field3)
self.assertIn(
"slug",
self.get_uniques(Book._meta.db_table),
)
# Remove the unique, check the index goes with it
new_field4 = CharField(max_length=20, unique=False)
new_field4.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True)
self.assertNotIn(
"slug",
self.get_uniques(Book._meta.db_table),
)
def test_text_field_with_db_index(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorTextFieldWithIndex)
# The text_field index is present if the database supports it.
assertion = (
self.assertIn
if connection.features.supports_index_on_text_field
else self.assertNotIn
)
assertion(
"text_field", self.get_indexes(AuthorTextFieldWithIndex._meta.db_table)
)
def _index_expressions_wrappers(self):
index_expression = IndexExpression()
index_expression.set_wrapper_classes(connection)
return ", ".join(
[
wrapper_cls.__qualname__
for wrapper_cls in index_expression.wrapper_classes
]
)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_multiple_wrapper_references(self):
index = Index(OrderBy(F("name").desc(), descending=True), name="name")
msg = (
"Multiple references to %s can't be used in an indexed expression."
% self._index_expressions_wrappers()
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
editor.add_index(Author, index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_invalid_topmost_expressions(self):
index = Index(Upper(F("name").desc()), name="name")
msg = (
"%s must be topmost expressions in an indexed expression."
% self._index_expressions_wrappers()
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
editor.add_index(Author, index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Lower("name").desc(), name="func_lower_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC"])
# SQL contains a database function.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("LOWER(%s)" % editor.quote_name("name"), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_f(self):
with connection.schema_editor() as editor:
editor.create_model(Tag)
index = Index("slug", F("title").desc(), name="func_f_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Tag, index)
sql = index.create_sql(Tag, editor)
table = Tag._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(Tag._meta.db_table, index.name, ["ASC", "DESC"])
# SQL contains columns.
self.assertIs(sql.references_column(table, "slug"), True)
self.assertIs(sql.references_column(table, "title"), True)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Tag, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_lookups(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
with register_lookup(CharField, Lower), register_lookup(IntegerField, Abs):
index = Index(
F("name__lower"),
F("weight__abs"),
name="func_lower_abs_lookup_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
# SQL contains columns.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "weight"), True)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_composite_func_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Lower("name"), Upper("name"), name="func_lower_upper_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
# SQL contains database functions.
self.assertIs(sql.references_column(table, "name"), True)
sql = str(sql)
self.assertIn("LOWER(%s)" % editor.quote_name("name"), sql)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), sql)
self.assertLess(sql.index("LOWER"), sql.index("UPPER"))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_composite_func_index_field_and_expression(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
index = Index(
F("author").desc(),
Lower("title").asc(),
"pub_date",
name="func_f_lower_field_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Book, index)
sql = index.create_sql(Book, editor)
table = Book._meta.db_table
constraints = self.get_constraints(table)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC", "ASC", "ASC"])
self.assertEqual(len(constraints[index.name]["columns"]), 3)
self.assertEqual(constraints[index.name]["columns"][2], "pub_date")
# SQL contains database functions and columns.
self.assertIs(sql.references_column(table, "author_id"), True)
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "pub_date"), True)
self.assertIn("LOWER(%s)" % editor.quote_name("title"), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Book, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
@isolate_apps("schema")
def test_func_index_f_decimalfield(self):
class Node(Model):
value = DecimalField(max_digits=5, decimal_places=2)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Node)
index = Index(F("value"), name="func_f_decimalfield_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Node, index)
sql = index.create_sql(Node, editor)
table = Node._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "value"), True)
# SQL doesn't contain casting.
self.assertNotIn("CAST", str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Node, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_cast(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Cast("weight", FloatField()), name="func_cast_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "weight"), True)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_collate(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithSlug)
index = Index(
Collate(F("title"), collation=collation).desc(),
Collate("slug", collation=collation),
name="func_collate_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(BookWithSlug, index)
sql = index.create_sql(BookWithSlug, editor)
table = Book._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC", "ASC"])
# SQL contains columns and a collation.
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "slug"), True)
self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Book, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
@skipIfDBFeature("collate_as_index_expression")
def test_func_index_collate_f_ordered(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(
Collate(F("name").desc(), collation=collation),
name="func_collate_f_desc_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC"])
# SQL contains columns and a collation.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_calc(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(F("height") / (F("weight") + Value(5)), name="func_calc_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
# SQL contains columns and expressions.
self.assertIs(sql.references_column(table, "height"), True)
self.assertIs(sql.references_column(table, "weight"), True)
sql = str(sql)
self.assertIs(
sql.index(editor.quote_name("height"))
< sql.index("/")
< sql.index(editor.quote_name("weight"))
< sql.index("+")
< sql.index("5"),
True,
)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_json_field")
@isolate_apps("schema")
def test_func_index_json_key_transform(self):
class JSONModel(Model):
field = JSONField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(JSONModel)
self.isolated_local_models = [JSONModel]
index = Index("field__some_key", name="func_json_key_idx")
with connection.schema_editor() as editor:
editor.add_index(JSONModel, index)
sql = index.create_sql(JSONModel, editor)
table = JSONModel._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "field"), True)
with connection.schema_editor() as editor:
editor.remove_index(JSONModel, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_json_field")
@isolate_apps("schema")
def test_func_index_json_key_transform_cast(self):
class JSONModel(Model):
field = JSONField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(JSONModel)
self.isolated_local_models = [JSONModel]
index = Index(
Cast(KeyTextTransform("some_key", "field"), IntegerField()),
name="func_json_key_cast_idx",
)
with connection.schema_editor() as editor:
editor.add_index(JSONModel, index)
sql = index.create_sql(JSONModel, editor)
table = JSONModel._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "field"), True)
with connection.schema_editor() as editor:
editor.remove_index(JSONModel, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipIfDBFeature("supports_expression_indexes")
def test_func_index_unsupported(self):
# Index is ignored on databases that don't support indexes on
# expressions.
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(F("name"), name="random_idx")
with connection.schema_editor() as editor, self.assertNumQueries(0):
self.assertIsNone(editor.add_index(Author, index))
self.assertIsNone(editor.remove_index(Author, index))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_nonexistent_field(self):
index = Index(Lower("nonexistent"), name="func_nonexistent_idx")
msg = (
"Cannot resolve keyword 'nonexistent' into field. Choices are: "
"height, id, name, uuid, weight"
)
with self.assertRaisesMessage(FieldError, msg):
with connection.schema_editor() as editor:
editor.add_index(Author, index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_nondeterministic(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Random(), name="func_random_idx")
with connection.schema_editor() as editor:
with self.assertRaises(DatabaseError):
editor.add_index(Author, index)
def test_primary_key(self):
"""
Tests altering of the primary key
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the table is there and has the right PK
self.assertEqual(self.get_primary_key(Tag._meta.db_table), "id")
# Alter to change the PK
id_field = Tag._meta.get_field("id")
old_field = Tag._meta.get_field("slug")
new_field = SlugField(primary_key=True)
new_field.set_attributes_from_name("slug")
new_field.model = Tag
with connection.schema_editor() as editor:
editor.remove_field(Tag, id_field)
editor.alter_field(Tag, old_field, new_field)
# Ensure the PK changed
self.assertNotIn(
"id",
self.get_indexes(Tag._meta.db_table),
)
self.assertEqual(self.get_primary_key(Tag._meta.db_table), "slug")
def test_alter_primary_key_the_same_name(self):
with connection.schema_editor() as editor:
editor.create_model(Thing)
old_field = Thing._meta.get_field("when")
new_field = CharField(max_length=2, primary_key=True)
new_field.set_attributes_from_name("when")
new_field.model = Thing
with connection.schema_editor() as editor:
editor.alter_field(Thing, old_field, new_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
with connection.schema_editor() as editor:
editor.alter_field(Thing, new_field, old_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
def test_context_manager_exit(self):
"""
Ensures transaction is correctly closed when an error occurs
inside a SchemaEditor context.
"""
class SomeError(Exception):
pass
try:
with connection.schema_editor():
raise SomeError
except SomeError:
self.assertFalse(connection.in_atomic_block)
@skipIfDBFeature("can_rollback_ddl")
def test_unsupported_transactional_ddl_disallowed(self):
message = (
"Executing DDL statements while in a transaction on databases "
"that can't perform a rollback is prohibited."
)
with atomic(), connection.schema_editor() as editor:
with self.assertRaisesMessage(TransactionManagementError, message):
editor.execute(
editor.sql_create_table % {"table": "foo", "definition": ""}
)
@skipUnlessDBFeature("supports_foreign_keys", "indexes_foreign_keys")
def test_foreign_key_index_long_names_regression(self):
"""
Regression test for #21497.
Only affects databases that supports foreign keys.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Find the properly shortened column name
column_name = connection.ops.quote_name(
"author_foreign_key_with_really_long_field_name_id"
)
column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase
# Ensure the table is there and has an index on the column
self.assertIn(
column_name,
self.get_indexes(BookWithLongName._meta.db_table),
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_add_foreign_key_long_names(self):
"""
Regression test for #23009.
Only affects databases that supports foreign keys.
"""
# Create the initial tables
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Add a second FK, this would fail due to long ref name before the fix
new_field = ForeignKey(
AuthorWithEvenLongerName, CASCADE, related_name="something"
)
new_field.set_attributes_from_name(
"author_other_really_long_named_i_mean_so_long_fk"
)
with connection.schema_editor() as editor:
editor.add_field(BookWithLongName, new_field)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_foreign_keys")
def test_add_foreign_key_quoted_db_table(self):
class Author(Model):
class Meta:
db_table = '"table_author_double_quoted"'
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
self.isolated_local_models = [Author]
if connection.vendor == "mysql":
self.assertForeignKeyExists(
Book, "author_id", '"table_author_double_quoted"'
)
else:
self.assertForeignKeyExists(Book, "author_id", "table_author_double_quoted")
def test_add_foreign_object(self):
with connection.schema_editor() as editor:
editor.create_model(BookForeignObj)
self.local_models = [BookForeignObj]
new_field = ForeignObject(
Author, on_delete=CASCADE, from_fields=["author_id"], to_fields=["id"]
)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.add_field(BookForeignObj, new_field)
def test_creation_deletion_reserved_names(self):
"""
Tries creating a model's table, and then deleting it when it has a
SQL reserved name.
"""
# Create the table
with connection.schema_editor() as editor:
try:
editor.create_model(Thing)
except OperationalError as e:
self.fail(
"Errors when applying initial migration for a model "
"with a table named after an SQL reserved word: %s" % e
)
# The table is there
list(Thing.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Thing)
# The table is gone
with self.assertRaises(DatabaseError):
list(Thing.objects.all())
def test_remove_constraints_capital_letters(self):
"""
#23065 - Constraint names must be quoted if they contain capital letters.
"""
def get_field(*args, field_class=IntegerField, **kwargs):
kwargs["db_column"] = "CamelCase"
field = field_class(*args, **kwargs)
field.set_attributes_from_name("CamelCase")
return field
model = Author
field = get_field()
table = model._meta.db_table
column = field.column
identifier_converter = connection.introspection.identifier_converter
with connection.schema_editor() as editor:
editor.create_model(model)
editor.add_field(model, field)
constraint_name = "CamelCaseIndex"
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(
editor.sql_create_index
% {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"using": "",
"columns": editor.quote_name(column),
"extra": "",
"condition": "",
"include": "",
}
)
self.assertIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
editor.alter_field(model, get_field(db_index=True), field, strict=True)
self.assertNotIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
constraint_name = "CamelCaseUniqConstraint"
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(editor._create_unique_sql(model, [field], constraint_name))
self.assertIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
editor.alter_field(model, get_field(unique=True), field, strict=True)
self.assertNotIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
if editor.sql_create_fk and connection.features.can_introspect_foreign_keys:
constraint_name = "CamelCaseFKConstraint"
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(
editor.sql_create_fk
% {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"column": editor.quote_name(column),
"to_table": editor.quote_name(table),
"to_column": editor.quote_name(model._meta.auto_field.column),
"deferrable": connection.ops.deferrable_sql(),
}
)
self.assertIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
editor.alter_field(
model,
get_field(Author, CASCADE, field_class=ForeignKey),
field,
strict=True,
)
self.assertNotIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
def test_add_field_use_effective_default(self):
"""
#23987 - effective_default() should be used as the field default when
adding a new field.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name="Anonymous1")
# Add new CharField to ensure default will be used from effective_default
new_field = CharField(max_length=15, blank=True)
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(
item[0],
None if connection.features.interprets_empty_strings_as_nulls else "",
)
def test_add_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name="Anonymous1")
# Add new CharField with a default
new_field = CharField(max_length=15, blank=True, default="surname default")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], "surname default")
# And that the default is no longer set in the database.
field = next(
f
for f in connection.introspection.get_table_description(
cursor, "schema_author"
)
if f.name == "surname"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_add_field_default_nullable(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add new nullable CharField with a default.
new_field = CharField(max_length=15, blank=True, null=True, default="surname")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
Author.objects.create(name="Anonymous1")
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertIsNone(item[0])
field = next(
f
for f in connection.introspection.get_table_description(
cursor,
"schema_author",
)
if f.name == "surname"
)
# Field is still nullable.
self.assertTrue(field.null_ok)
# The database default is no longer set.
if connection.features.can_introspect_default:
self.assertIn(field.default, ["NULL", None])
def test_add_textfield_default_nullable(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add new nullable TextField with a default.
new_field = TextField(blank=True, null=True, default="text")
new_field.set_attributes_from_name("description")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
Author.objects.create(name="Anonymous1")
with connection.cursor() as cursor:
cursor.execute("SELECT description FROM schema_author;")
item = cursor.fetchall()[0]
self.assertIsNone(item[0])
field = next(
f
for f in connection.introspection.get_table_description(
cursor,
"schema_author",
)
if f.name == "description"
)
# Field is still nullable.
self.assertTrue(field.null_ok)
# The database default is no longer set.
if connection.features.can_introspect_default:
self.assertIn(field.default, ["NULL", None])
def test_alter_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name="Anonymous1")
self.assertIsNone(Author.objects.get().height)
old_field = Author._meta.get_field("height")
# The default from the new field is used in updating existing rows.
new_field = IntegerField(blank=True, default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(Author.objects.get().height, 42)
# The database default should be removed.
with connection.cursor() as cursor:
field = next(
f
for f in connection.introspection.get_table_description(
cursor, "schema_author"
)
if f.name == "height"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_alter_field_default_doesnt_perform_queries(self):
"""
No queries are performed if a field default changes and the field's
not changing from null to non-null.
"""
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_default = old_field.default * 2
new_field = PositiveIntegerField(null=True, blank=True, default=new_default)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(
AuthorWithDefaultHeight, old_field, new_field, strict=True
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_fk_attributes_noop(self):
"""
No queries are performed when changing field attributes that don't
affect the schema.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
old_field = Book._meta.get_field("author")
new_field = ForeignKey(
Author,
blank=True,
editable=False,
error_messages={"invalid": "error message"},
help_text="help text",
limit_choices_to={"limit": "choice"},
on_delete=PROTECT,
related_name="related_name",
related_query_name="related_query_name",
validators=[lambda x: x],
verbose_name="verbose name",
)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Book, old_field, new_field, strict=True)
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Book, new_field, old_field, strict=True)
def test_alter_field_choices_noop(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("name")
new_field = CharField(
choices=(("Jane", "Jane"), ("Joe", "Joe")),
max_length=255,
)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Author, old_field, new_field, strict=True)
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Author, new_field, old_field, strict=True)
def test_add_textfield_unhashable_default(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name="Anonymous1")
# Create a field that has an unhashable default
new_field = TextField(default={})
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_add_indexed_charfield(self):
field = CharField(max_length=255, db_index=True)
field.set_attributes_from_name("nom_de_plume")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
# Should create two indexes; one for like operator.
self.assertEqual(
self.get_constraints_for_column(Author, "nom_de_plume"),
[
"schema_author_nom_de_plume_7570a851",
"schema_author_nom_de_plume_7570a851_like",
],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_add_unique_charfield(self):
field = CharField(max_length=255, unique=True)
field.set_attributes_from_name("nom_de_plume")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
# Should create two indexes; one for like operator.
self.assertEqual(
self.get_constraints_for_column(Author, "nom_de_plume"),
[
"schema_author_nom_de_plume_7570a851_like",
"schema_author_nom_de_plume_key",
],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_index_to_charfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
# Alter to add db_index=True and create 2 indexes.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, db_index=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, "name"),
["schema_author_name_1fbc5617", "schema_author_name_1fbc5617_like"],
)
# Remove db_index=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_unique_to_charfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
# Alter to add unique=True and create 2 indexes.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, "name"),
["schema_author_name_1fbc5617_like", "schema_author_name_1fbc5617_uniq"],
)
# Remove unique=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_index_to_textfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Note)
self.assertEqual(self.get_constraints_for_column(Note, "info"), [])
# Alter to add db_index=True and create 2 indexes.
old_field = Note._meta.get_field("info")
new_field = TextField(db_index=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Note, "info"),
["schema_note_info_4b0ea695", "schema_note_info_4b0ea695_like"],
)
# Remove db_index=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Note, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Note, "info"), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_unique_to_charfield_with_db_index(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
# Alter to add unique=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field("title")
new_field = CharField(max_length=100, db_index=True, unique=True)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"],
)
# Alter to remove unique=True (should drop unique index)
new_field2 = CharField(max_length=100, db_index=True)
new_field2.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_remove_unique_and_db_index_from_charfield(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
# Alter to add unique=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field("title")
new_field = CharField(max_length=100, db_index=True, unique=True)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"],
)
# Alter to remove both unique=True and db_index=True (should drop all indexes)
new_field2 = CharField(max_length=100)
new_field2.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"), []
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_swap_unique_and_db_index_with_charfield(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
# Alter to set unique=True and remove db_index=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field("title")
new_field = CharField(max_length=100, unique=True)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"],
)
# Alter to set db_index=True and remove unique=True (should restore index)
new_field2 = CharField(max_length=100, db_index=True)
new_field2.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_db_index_to_charfield_with_unique(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Tag)
self.assertEqual(
self.get_constraints_for_column(Tag, "slug"),
["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"],
)
# Alter to add db_index=True
old_field = Tag._meta.get_field("slug")
new_field = SlugField(db_index=True, unique=True)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Tag, "slug"),
["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"],
)
# Alter to remove db_index=True
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(Tag, "slug"),
["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"],
)
def test_alter_field_add_index_to_integerfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, "weight"), [])
# Alter to add db_index=True and create index.
old_field = Author._meta.get_field("weight")
new_field = IntegerField(null=True, db_index=True)
new_field.set_attributes_from_name("weight")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, "weight"),
["schema_author_weight_587740f9"],
)
# Remove db_index=True to drop index.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, "weight"), [])
def test_alter_pk_with_self_referential_field(self):
"""
Changing the primary key field name of a model with a self-referential
foreign key (#26384).
"""
with connection.schema_editor() as editor:
editor.create_model(Node)
old_field = Node._meta.get_field("node_id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(Node, old_field, new_field, strict=True)
self.assertForeignKeyExists(Node, "parent_id", Node._meta.db_table)
@mock.patch("django.db.backends.base.schema.datetime")
@mock.patch("django.db.backends.base.schema.timezone")
def test_add_datefield_and_datetimefield_use_effective_default(
self, mocked_datetime, mocked_tz
):
"""
effective_default() should be used for DateField, DateTimeField, and
TimeField if auto_now or auto_now_add is set (#25005).
"""
now = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1)
now_tz = datetime.datetime(
month=1, day=1, year=2000, hour=1, minute=1, tzinfo=datetime.timezone.utc
)
mocked_datetime.now = mock.MagicMock(return_value=now)
mocked_tz.now = mock.MagicMock(return_value=now_tz)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Check auto_now/auto_now_add attributes are not defined
columns = self.column_classes(Author)
self.assertNotIn("dob_auto_now", columns)
self.assertNotIn("dob_auto_now_add", columns)
self.assertNotIn("dtob_auto_now", columns)
self.assertNotIn("dtob_auto_now_add", columns)
self.assertNotIn("tob_auto_now", columns)
self.assertNotIn("tob_auto_now_add", columns)
# Create a row
Author.objects.create(name="Anonymous1")
# Ensure fields were added with the correct defaults
dob_auto_now = DateField(auto_now=True)
dob_auto_now.set_attributes_from_name("dob_auto_now")
self.check_added_field_default(
editor,
Author,
dob_auto_now,
"dob_auto_now",
now.date(),
cast_function=lambda x: x.date(),
)
dob_auto_now_add = DateField(auto_now_add=True)
dob_auto_now_add.set_attributes_from_name("dob_auto_now_add")
self.check_added_field_default(
editor,
Author,
dob_auto_now_add,
"dob_auto_now_add",
now.date(),
cast_function=lambda x: x.date(),
)
dtob_auto_now = DateTimeField(auto_now=True)
dtob_auto_now.set_attributes_from_name("dtob_auto_now")
self.check_added_field_default(
editor,
Author,
dtob_auto_now,
"dtob_auto_now",
now,
)
dt_tm_of_birth_auto_now_add = DateTimeField(auto_now_add=True)
dt_tm_of_birth_auto_now_add.set_attributes_from_name("dtob_auto_now_add")
self.check_added_field_default(
editor,
Author,
dt_tm_of_birth_auto_now_add,
"dtob_auto_now_add",
now,
)
tob_auto_now = TimeField(auto_now=True)
tob_auto_now.set_attributes_from_name("tob_auto_now")
self.check_added_field_default(
editor,
Author,
tob_auto_now,
"tob_auto_now",
now.time(),
cast_function=lambda x: x.time(),
)
tob_auto_now_add = TimeField(auto_now_add=True)
tob_auto_now_add.set_attributes_from_name("tob_auto_now_add")
self.check_added_field_default(
editor,
Author,
tob_auto_now_add,
"tob_auto_now_add",
now.time(),
cast_function=lambda x: x.time(),
)
def test_namespaced_db_table_create_index_name(self):
"""
Table names are stripped of their namespace/schema before being used to
generate index names.
"""
with connection.schema_editor() as editor:
max_name_length = connection.ops.max_name_length() or 200
namespace = "n" * max_name_length
table_name = "t" * max_name_length
namespaced_table_name = '"%s"."%s"' % (namespace, table_name)
self.assertEqual(
editor._create_index_name(table_name, []),
editor._create_index_name(namespaced_table_name, []),
)
@unittest.skipUnless(
connection.vendor == "oracle", "Oracle specific db_table syntax"
)
def test_creation_with_db_table_double_quotes(self):
oracle_user = connection.creation._test_database_user()
class Student(Model):
name = CharField(max_length=30)
class Meta:
app_label = "schema"
apps = new_apps
db_table = '"%s"."DJANGO_STUDENT_TABLE"' % oracle_user
class Document(Model):
name = CharField(max_length=30)
students = ManyToManyField(Student)
class Meta:
app_label = "schema"
apps = new_apps
db_table = '"%s"."DJANGO_DOCUMENT_TABLE"' % oracle_user
self.isolated_local_models = [Student, Document]
with connection.schema_editor() as editor:
editor.create_model(Student)
editor.create_model(Document)
doc = Document.objects.create(name="Test Name")
student = Student.objects.create(name="Some man")
doc.students.add(student)
@isolate_apps("schema")
@unittest.skipUnless(
connection.vendor == "postgresql", "PostgreSQL specific db_table syntax."
)
def test_namespaced_db_table_foreign_key_reference(self):
with connection.cursor() as cursor:
cursor.execute("CREATE SCHEMA django_schema_tests")
def delete_schema():
with connection.cursor() as cursor:
cursor.execute("DROP SCHEMA django_schema_tests CASCADE")
self.addCleanup(delete_schema)
class Author(Model):
class Meta:
app_label = "schema"
class Book(Model):
class Meta:
app_label = "schema"
db_table = '"django_schema_tests"."schema_book"'
author = ForeignKey(Author, CASCADE)
author.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.add_field(Book, author)
def test_rename_table_renames_deferred_sql_references(self):
atomic_rename = connection.features.supports_atomic_references_rename
with connection.schema_editor(atomic=atomic_rename) as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.alter_db_table(Author, "schema_author", "schema_renamed_author")
editor.alter_db_table(Author, "schema_book", "schema_renamed_book")
try:
self.assertGreater(len(editor.deferred_sql), 0)
for statement in editor.deferred_sql:
self.assertIs(statement.references_table("schema_author"), False)
self.assertIs(statement.references_table("schema_book"), False)
finally:
editor.alter_db_table(Author, "schema_renamed_author", "schema_author")
editor.alter_db_table(Author, "schema_renamed_book", "schema_book")
def test_rename_column_renames_deferred_sql_references(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
old_title = Book._meta.get_field("title")
new_title = CharField(max_length=100, db_index=True)
new_title.set_attributes_from_name("renamed_title")
editor.alter_field(Book, old_title, new_title)
old_author = Book._meta.get_field("author")
new_author = ForeignKey(Author, CASCADE)
new_author.set_attributes_from_name("renamed_author")
editor.alter_field(Book, old_author, new_author)
self.assertGreater(len(editor.deferred_sql), 0)
for statement in editor.deferred_sql:
self.assertIs(statement.references_column("book", "title"), False)
self.assertIs(statement.references_column("book", "author_id"), False)
@isolate_apps("schema")
def test_referenced_field_without_constraint_rename_inside_atomic_block(self):
"""
Foreign keys without database level constraint don't prevent the field
they reference from being renamed in an atomic block.
"""
class Foo(Model):
field = CharField(max_length=255, unique=True)
class Meta:
app_label = "schema"
class Bar(Model):
foo = ForeignKey(Foo, CASCADE, to_field="field", db_constraint=False)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo, Bar]
with connection.schema_editor() as editor:
editor.create_model(Foo)
editor.create_model(Bar)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("renamed")
with connection.schema_editor(atomic=True) as editor:
editor.alter_field(Foo, Foo._meta.get_field("field"), new_field)
@isolate_apps("schema")
def test_referenced_table_without_constraint_rename_inside_atomic_block(self):
"""
Foreign keys without database level constraint don't prevent the table
they reference from being renamed in an atomic block.
"""
class Foo(Model):
field = CharField(max_length=255, unique=True)
class Meta:
app_label = "schema"
class Bar(Model):
foo = ForeignKey(Foo, CASCADE, to_field="field", db_constraint=False)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo, Bar]
with connection.schema_editor() as editor:
editor.create_model(Foo)
editor.create_model(Bar)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("renamed")
with connection.schema_editor(atomic=True) as editor:
editor.alter_db_table(Foo, Foo._meta.db_table, "renamed_table")
Foo._meta.db_table = "renamed_table"
@isolate_apps("schema")
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_db_collation_charfield(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
class Foo(Model):
field = CharField(max_length=255, db_collation=collation)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo]
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.assertEqual(
self.get_column_collation(Foo._meta.db_table, "field"),
collation,
)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_collation_on_textfield")
def test_db_collation_textfield(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
class Foo(Model):
field = TextField(db_collation=collation)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo]
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.assertEqual(
self.get_column_collation(Foo._meta.db_table, "field"),
collation,
)
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_add_field_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Author)
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("alias")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertEqual(
columns["alias"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(columns["alias"][1][8], collation)
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_alter_field_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("name")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_column_collation(Author._meta.db_table, "name"),
collation,
)
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertIsNone(self.get_column_collation(Author._meta.db_table, "name"))
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_alter_primary_key_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Thing)
old_field = Thing._meta.get_field("when")
new_field = CharField(max_length=1, db_collation=collation, primary_key=True)
new_field.set_attributes_from_name("when")
new_field.model = Thing
with connection.schema_editor() as editor:
editor.alter_field(Thing, old_field, new_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
self.assertEqual(
self.get_column_collation(Thing._meta.db_table, "when"),
collation,
)
with connection.schema_editor() as editor:
editor.alter_field(Thing, new_field, old_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
self.assertIsNone(self.get_column_collation(Thing._meta.db_table, "when"))
@skipUnlessDBFeature(
"supports_collation_on_charfield", "supports_collation_on_textfield"
)
def test_alter_field_type_and_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("info")
new_field.model = Note
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(
columns["info"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(columns["info"][1][8], collation)
with connection.schema_editor() as editor:
editor.alter_field(Note, new_field, old_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns["info"][0], "TextField")
self.assertIsNone(columns["info"][1][8])
@skipUnlessDBFeature(
"supports_collation_on_charfield",
"supports_non_deterministic_collations",
)
def test_ci_cs_db_collation(self):
cs_collation = connection.features.test_collations.get("cs")
ci_collation = connection.features.test_collations.get("ci")
try:
if connection.vendor == "mysql":
cs_collation = "latin1_general_cs"
elif connection.vendor == "postgresql":
cs_collation = "en-x-icu"
with connection.cursor() as cursor:
cursor.execute(
"CREATE COLLATION IF NOT EXISTS case_insensitive "
"(provider = icu, locale = 'und-u-ks-level2', "
"deterministic = false)"
)
ci_collation = "case_insensitive"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
# Case-insensitive collation.
old_field = Author._meta.get_field("name")
new_field_ci = CharField(max_length=255, db_collation=ci_collation)
new_field_ci.set_attributes_from_name("name")
new_field_ci.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field_ci, strict=True)
Author.objects.create(name="ANDREW")
self.assertIs(Author.objects.filter(name="Andrew").exists(), True)
# Case-sensitive collation.
new_field_cs = CharField(max_length=255, db_collation=cs_collation)
new_field_cs.set_attributes_from_name("name")
new_field_cs.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field_ci, new_field_cs, strict=True)
self.assertIs(Author.objects.filter(name="Andrew").exists(), False)
finally:
if connection.vendor == "postgresql":
with connection.cursor() as cursor:
cursor.execute("DROP COLLATION IF EXISTS case_insensitive")
|
2e190db8e0227ebc820d6bde6cd4d1b9b600bd300398d526f320206934effab6 | import operator
import uuid
from unittest import mock
from django import forms
from django.core import serializers
from django.core.exceptions import ValidationError
from django.core.serializers.json import DjangoJSONEncoder
from django.db import (
DataError,
IntegrityError,
NotSupportedError,
OperationalError,
connection,
models,
)
from django.db.models import (
Count,
ExpressionWrapper,
F,
IntegerField,
OuterRef,
Q,
Subquery,
Transform,
Value,
)
from django.db.models.expressions import RawSQL
from django.db.models.fields.json import (
KeyTextTransform,
KeyTransform,
KeyTransformFactory,
KeyTransformTextLookupMixin,
)
from django.db.models.functions import Cast
from django.test import SimpleTestCase, TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from .models import CustomJSONDecoder, JSONModel, NullableJSONModel, RelatedJSONModel
@skipUnlessDBFeature("supports_json_field")
class JSONFieldTests(TestCase):
def test_invalid_value(self):
msg = "is not JSON serializable"
with self.assertRaisesMessage(TypeError, msg):
NullableJSONModel.objects.create(
value={
"uuid": uuid.UUID("d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475"),
}
)
def test_custom_encoder_decoder(self):
value = {"uuid": uuid.UUID("{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}")}
obj = NullableJSONModel(value_custom=value)
obj.clean_fields()
obj.save()
obj.refresh_from_db()
self.assertEqual(obj.value_custom, value)
def test_db_check_constraints(self):
value = "{@!invalid json value 123 $!@#"
with mock.patch.object(DjangoJSONEncoder, "encode", return_value=value):
with self.assertRaises((IntegrityError, DataError, OperationalError)):
NullableJSONModel.objects.create(value_custom=value)
class TestMethods(SimpleTestCase):
def test_deconstruct(self):
field = models.JSONField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.JSONField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_deconstruct_custom_encoder_decoder(self):
field = models.JSONField(encoder=DjangoJSONEncoder, decoder=CustomJSONDecoder)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(kwargs["encoder"], DjangoJSONEncoder)
self.assertEqual(kwargs["decoder"], CustomJSONDecoder)
def test_get_transforms(self):
@models.JSONField.register_lookup
class MyTransform(Transform):
lookup_name = "my_transform"
field = models.JSONField()
transform = field.get_transform("my_transform")
self.assertIs(transform, MyTransform)
models.JSONField._unregister_lookup(MyTransform)
transform = field.get_transform("my_transform")
self.assertIsInstance(transform, KeyTransformFactory)
def test_key_transform_text_lookup_mixin_non_key_transform(self):
transform = Transform("test")
msg = (
"Transform should be an instance of KeyTransform in order to use "
"this lookup."
)
with self.assertRaisesMessage(TypeError, msg):
KeyTransformTextLookupMixin(transform)
class TestValidation(SimpleTestCase):
def test_invalid_encoder(self):
msg = "The encoder parameter must be a callable object."
with self.assertRaisesMessage(ValueError, msg):
models.JSONField(encoder=DjangoJSONEncoder())
def test_invalid_decoder(self):
msg = "The decoder parameter must be a callable object."
with self.assertRaisesMessage(ValueError, msg):
models.JSONField(decoder=CustomJSONDecoder())
def test_validation_error(self):
field = models.JSONField()
msg = "Value must be valid JSON."
value = uuid.UUID("{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}")
with self.assertRaisesMessage(ValidationError, msg):
field.clean({"uuid": value}, None)
def test_custom_encoder(self):
field = models.JSONField(encoder=DjangoJSONEncoder)
value = uuid.UUID("{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}")
field.clean({"uuid": value}, None)
class TestFormField(SimpleTestCase):
def test_formfield(self):
model_field = models.JSONField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, forms.JSONField)
def test_formfield_custom_encoder_decoder(self):
model_field = models.JSONField(
encoder=DjangoJSONEncoder, decoder=CustomJSONDecoder
)
form_field = model_field.formfield()
self.assertIs(form_field.encoder, DjangoJSONEncoder)
self.assertIs(form_field.decoder, CustomJSONDecoder)
class TestSerialization(SimpleTestCase):
test_data = (
'[{"fields": {"value": %s}, "model": "model_fields.jsonmodel", "pk": null}]'
)
test_values = (
# (Python value, serialized value),
({"a": "b", "c": None}, '{"a": "b", "c": null}'),
("abc", '"abc"'),
('{"a": "a"}', '"{\\"a\\": \\"a\\"}"'),
)
def test_dumping(self):
for value, serialized in self.test_values:
with self.subTest(value=value):
instance = JSONModel(value=value)
data = serializers.serialize("json", [instance])
self.assertJSONEqual(data, self.test_data % serialized)
def test_loading(self):
for value, serialized in self.test_values:
with self.subTest(value=value):
instance = list(
serializers.deserialize("json", self.test_data % serialized)
)[0].object
self.assertEqual(instance.value, value)
def test_xml_serialization(self):
test_xml_data = (
'<django-objects version="1.0">'
'<object model="model_fields.nullablejsonmodel">'
'<field name="value" type="JSONField">%s'
"</field></object></django-objects>"
)
for value, serialized in self.test_values:
with self.subTest(value=value):
instance = NullableJSONModel(value=value)
data = serializers.serialize("xml", [instance], fields=["value"])
self.assertXMLEqual(data, test_xml_data % serialized)
new_instance = list(serializers.deserialize("xml", data))[0].object
self.assertEqual(new_instance.value, instance.value)
@skipUnlessDBFeature("supports_json_field")
class TestSaveLoad(TestCase):
def test_null(self):
obj = NullableJSONModel(value=None)
obj.save()
obj.refresh_from_db()
self.assertIsNone(obj.value)
@skipUnlessDBFeature("supports_primitives_in_json_field")
def test_json_null_different_from_sql_null(self):
json_null = NullableJSONModel.objects.create(value=Value("null"))
json_null.refresh_from_db()
sql_null = NullableJSONModel.objects.create(value=None)
sql_null.refresh_from_db()
# 'null' is not equal to NULL in the database.
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value=Value("null")),
[json_null],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value=None),
[json_null],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__isnull=True),
[sql_null],
)
# 'null' is equal to NULL in Python (None).
self.assertEqual(json_null.value, sql_null.value)
@skipUnlessDBFeature("supports_primitives_in_json_field")
def test_primitives(self):
values = [
True,
1,
1.45,
"String",
"",
]
for value in values:
with self.subTest(value=value):
obj = JSONModel(value=value)
obj.save()
obj.refresh_from_db()
self.assertEqual(obj.value, value)
def test_dict(self):
values = [
{},
{"name": "John", "age": 20, "height": 180.3},
{"a": True, "b": {"b1": False, "b2": None}},
]
for value in values:
with self.subTest(value=value):
obj = JSONModel.objects.create(value=value)
obj.refresh_from_db()
self.assertEqual(obj.value, value)
def test_list(self):
values = [
[],
["John", 20, 180.3],
[True, [False, None]],
]
for value in values:
with self.subTest(value=value):
obj = JSONModel.objects.create(value=value)
obj.refresh_from_db()
self.assertEqual(obj.value, value)
def test_realistic_object(self):
value = {
"name": "John",
"age": 20,
"pets": [
{"name": "Kit", "type": "cat", "age": 2},
{"name": "Max", "type": "dog", "age": 1},
],
"courses": [
["A1", "A2", "A3"],
["B1", "B2"],
["C1"],
],
}
obj = JSONModel.objects.create(value=value)
obj.refresh_from_db()
self.assertEqual(obj.value, value)
@skipUnlessDBFeature("supports_json_field")
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.primitives = [True, False, "yes", 7, 9.6]
values = [
None,
[],
{},
{"a": "b", "c": 14},
{
"a": "b",
"c": 14,
"d": ["e", {"f": "g"}],
"h": True,
"i": False,
"j": None,
"k": {"l": "m"},
"n": [None, True, False],
"o": '"quoted"',
"p": 4.2,
"r": {"s": True, "t": False},
},
[1, [2]],
{"k": True, "l": False, "foo": "bax"},
{
"foo": "bar",
"baz": {"a": "b", "c": "d"},
"bar": ["foo", "bar"],
"bax": {"foo": "bar"},
},
]
cls.objs = [NullableJSONModel.objects.create(value=value) for value in values]
if connection.features.supports_primitives_in_json_field:
cls.objs.extend(
[
NullableJSONModel.objects.create(value=value)
for value in cls.primitives
]
)
cls.raw_sql = "%s::jsonb" if connection.vendor == "postgresql" else "%s"
def test_exact(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__exact={}),
[self.objs[2]],
)
def test_exact_complex(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__exact={"a": "b", "c": 14}),
[self.objs[3]],
)
def test_icontains(self):
self.assertCountEqual(
NullableJSONModel.objects.filter(value__icontains="BaX"),
self.objs[6:8],
)
def test_isnull(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__isnull=True),
[self.objs[0]],
)
def test_ordering_by_transform(self):
mariadb = connection.vendor == "mysql" and connection.mysql_is_mariadb
values = [
{"ord": 93, "name": "bar"},
{"ord": 22.1, "name": "foo"},
{"ord": -1, "name": "baz"},
{"ord": 21.931902, "name": "spam"},
{"ord": -100291029, "name": "eggs"},
]
for field_name in ["value", "value_custom"]:
with self.subTest(field=field_name):
objs = [
NullableJSONModel.objects.create(**{field_name: value})
for value in values
]
query = NullableJSONModel.objects.filter(
**{"%s__name__isnull" % field_name: False},
).order_by("%s__ord" % field_name)
expected = [objs[4], objs[2], objs[3], objs[1], objs[0]]
if mariadb or connection.vendor == "oracle":
# MariaDB and Oracle return JSON values as strings.
expected = [objs[2], objs[4], objs[3], objs[1], objs[0]]
self.assertSequenceEqual(query, expected)
def test_ordering_grouping_by_key_transform(self):
base_qs = NullableJSONModel.objects.filter(value__d__0__isnull=False)
for qs in (
base_qs.order_by("value__d__0"),
base_qs.annotate(
key=KeyTransform("0", KeyTransform("d", "value"))
).order_by("key"),
):
self.assertSequenceEqual(qs, [self.objs[4]])
none_val = "" if connection.features.interprets_empty_strings_as_nulls else None
qs = NullableJSONModel.objects.filter(value__isnull=False)
self.assertQuerysetEqual(
qs.filter(value__isnull=False)
.annotate(
key=KeyTextTransform(
"f", KeyTransform("1", KeyTransform("d", "value"))
),
)
.values("key")
.annotate(count=Count("key"))
.order_by("count"),
[(none_val, 0), ("g", 1)],
operator.itemgetter("key", "count"),
)
def test_ordering_grouping_by_count(self):
qs = (
NullableJSONModel.objects.filter(
value__isnull=False,
)
.values("value__d__0")
.annotate(count=Count("value__d__0"))
.order_by("count")
)
self.assertQuerysetEqual(qs, [0, 1], operator.itemgetter("count"))
def test_order_grouping_custom_decoder(self):
NullableJSONModel.objects.create(value_custom={"a": "b"})
qs = NullableJSONModel.objects.filter(value_custom__isnull=False)
self.assertSequenceEqual(
qs.values(
"value_custom__a",
)
.annotate(
count=Count("id"),
)
.order_by("value_custom__a"),
[{"value_custom__a": "b", "count": 1}],
)
def test_key_transform_raw_expression(self):
expr = RawSQL(self.raw_sql, ['{"x": "bar"}'])
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__foo=KeyTransform("x", expr)),
[self.objs[7]],
)
def test_nested_key_transform_raw_expression(self):
expr = RawSQL(self.raw_sql, ['{"x": {"y": "bar"}}'])
self.assertSequenceEqual(
NullableJSONModel.objects.filter(
value__foo=KeyTransform("y", KeyTransform("x", expr))
),
[self.objs[7]],
)
def test_key_transform_expression(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
key=KeyTransform("d", "value"),
chain=KeyTransform("0", "key"),
expr=KeyTransform("0", Cast("key", models.JSONField())),
)
.filter(chain=F("expr")),
[self.objs[4]],
)
def test_key_transform_annotation_expression(self):
obj = NullableJSONModel.objects.create(value={"d": ["e", "e"]})
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
key=F("value__d"),
chain=F("key__0"),
expr=Cast("key", models.JSONField()),
)
.filter(chain=F("expr__1")),
[obj],
)
def test_nested_key_transform_expression(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
key=KeyTransform("d", "value"),
chain=KeyTransform("f", KeyTransform("1", "key")),
expr=KeyTransform(
"f", KeyTransform("1", Cast("key", models.JSONField()))
),
)
.filter(chain=F("expr")),
[self.objs[4]],
)
def test_nested_key_transform_annotation_expression(self):
obj = NullableJSONModel.objects.create(
value={"d": ["e", {"f": "g"}, {"f": "g"}]},
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
key=F("value__d"),
chain=F("key__1__f"),
expr=Cast("key", models.JSONField()),
)
.filter(chain=F("expr__2__f")),
[obj],
)
def test_nested_key_transform_on_subquery(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
subquery_value=Subquery(
NullableJSONModel.objects.filter(pk=OuterRef("pk")).values("value")
),
key=KeyTransform("d", "subquery_value"),
chain=KeyTransform("f", KeyTransform("1", "key")),
)
.filter(chain="g"),
[self.objs[4]],
)
def test_key_text_transform_char_lookup(self):
qs = NullableJSONModel.objects.annotate(
char_value=KeyTextTransform("foo", "value"),
).filter(char_value__startswith="bar")
self.assertSequenceEqual(qs, [self.objs[7]])
qs = NullableJSONModel.objects.annotate(
char_value=KeyTextTransform(1, KeyTextTransform("bar", "value")),
).filter(char_value__startswith="bar")
self.assertSequenceEqual(qs, [self.objs[7]])
def test_expression_wrapper_key_transform(self):
self.assertCountEqual(
NullableJSONModel.objects.annotate(
expr=ExpressionWrapper(
KeyTransform("c", "value"),
output_field=IntegerField(),
),
).filter(expr__isnull=False),
self.objs[3:5],
)
def test_has_key(self):
self.assertCountEqual(
NullableJSONModel.objects.filter(value__has_key="a"),
[self.objs[3], self.objs[4]],
)
def test_has_key_null_value(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_key="j"),
[self.objs[4]],
)
def test_has_key_deep(self):
tests = [
(Q(value__baz__has_key="a"), self.objs[7]),
(
Q(value__has_key=KeyTransform("a", KeyTransform("baz", "value"))),
self.objs[7],
),
(Q(value__has_key=F("value__baz__a")), self.objs[7]),
(
Q(value__has_key=KeyTransform("c", KeyTransform("baz", "value"))),
self.objs[7],
),
(Q(value__has_key=F("value__baz__c")), self.objs[7]),
(Q(value__d__1__has_key="f"), self.objs[4]),
(
Q(
value__has_key=KeyTransform(
"f", KeyTransform("1", KeyTransform("d", "value"))
)
),
self.objs[4],
),
(Q(value__has_key=F("value__d__1__f")), self.objs[4]),
]
for condition, expected in tests:
with self.subTest(condition=condition):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition),
[expected],
)
def test_has_key_list(self):
obj = NullableJSONModel.objects.create(value=[{"a": 1}, {"b": "x"}])
tests = [
Q(value__1__has_key="b"),
Q(value__has_key=KeyTransform("b", KeyTransform(1, "value"))),
Q(value__has_key=KeyTransform("b", KeyTransform("1", "value"))),
Q(value__has_key=F("value__1__b")),
]
for condition in tests:
with self.subTest(condition=condition):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition),
[obj],
)
def test_has_keys(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_keys=["a", "c", "h"]),
[self.objs[4]],
)
def test_has_any_keys(self):
self.assertCountEqual(
NullableJSONModel.objects.filter(value__has_any_keys=["c", "l"]),
[self.objs[3], self.objs[4], self.objs[6]],
)
def test_has_key_number(self):
obj = NullableJSONModel.objects.create(
value={
"123": "value",
"nested": {"456": "bar", "lorem": "abc", "999": True},
"array": [{"789": "baz", "777": "def", "ipsum": 200}],
"000": "val",
}
)
tests = [
Q(value__has_key="123"),
Q(value__nested__has_key="456"),
Q(value__array__0__has_key="789"),
Q(value__has_keys=["nested", "123", "array", "000"]),
Q(value__nested__has_keys=["lorem", "999", "456"]),
Q(value__array__0__has_keys=["789", "ipsum", "777"]),
Q(value__has_any_keys=["000", "nonexistent"]),
Q(value__nested__has_any_keys=["999", "nonexistent"]),
Q(value__array__0__has_any_keys=["777", "nonexistent"]),
]
for condition in tests:
with self.subTest(condition=condition):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition),
[obj],
)
@skipUnlessDBFeature("supports_json_field_contains")
def test_contains(self):
tests = [
({}, self.objs[2:5] + self.objs[6:8]),
({"baz": {"a": "b", "c": "d"}}, [self.objs[7]]),
({"baz": {"a": "b"}}, [self.objs[7]]),
({"baz": {"c": "d"}}, [self.objs[7]]),
({"k": True, "l": False}, [self.objs[6]]),
({"d": ["e", {"f": "g"}]}, [self.objs[4]]),
({"d": ["e"]}, [self.objs[4]]),
({"d": [{"f": "g"}]}, [self.objs[4]]),
([1, [2]], [self.objs[5]]),
([1], [self.objs[5]]),
([[2]], [self.objs[5]]),
({"n": [None, True, False]}, [self.objs[4]]),
({"j": None}, [self.objs[4]]),
]
for value, expected in tests:
with self.subTest(value=value):
qs = NullableJSONModel.objects.filter(value__contains=value)
self.assertCountEqual(qs, expected)
@skipIfDBFeature("supports_json_field_contains")
def test_contains_unsupported(self):
msg = "contains lookup is not supported on this database backend."
with self.assertRaisesMessage(NotSupportedError, msg):
NullableJSONModel.objects.filter(
value__contains={"baz": {"a": "b", "c": "d"}},
).get()
@skipUnlessDBFeature(
"supports_primitives_in_json_field",
"supports_json_field_contains",
)
def test_contains_primitives(self):
for value in self.primitives:
with self.subTest(value=value):
qs = NullableJSONModel.objects.filter(value__contains=value)
self.assertIs(qs.exists(), True)
@skipUnlessDBFeature("supports_json_field_contains")
def test_contained_by(self):
qs = NullableJSONModel.objects.filter(
value__contained_by={"a": "b", "c": 14, "h": True}
)
self.assertCountEqual(qs, self.objs[2:4])
@skipIfDBFeature("supports_json_field_contains")
def test_contained_by_unsupported(self):
msg = "contained_by lookup is not supported on this database backend."
with self.assertRaisesMessage(NotSupportedError, msg):
NullableJSONModel.objects.filter(value__contained_by={"a": "b"}).get()
def test_deep_values(self):
qs = NullableJSONModel.objects.values_list("value__k__l").order_by("pk")
expected_objs = [(None,)] * len(self.objs)
expected_objs[4] = ("m",)
self.assertSequenceEqual(qs, expected_objs)
@skipUnlessDBFeature("can_distinct_on_fields")
def test_deep_distinct(self):
query = NullableJSONModel.objects.distinct("value__k__l").values_list(
"value__k__l"
)
self.assertSequenceEqual(query, [("m",), (None,)])
def test_isnull_key(self):
# key__isnull=False works the same as has_key='key'.
self.assertCountEqual(
NullableJSONModel.objects.filter(value__a__isnull=True),
self.objs[:3] + self.objs[5:],
)
self.assertCountEqual(
NullableJSONModel.objects.filter(value__j__isnull=True),
self.objs[:4] + self.objs[5:],
)
self.assertCountEqual(
NullableJSONModel.objects.filter(value__a__isnull=False),
[self.objs[3], self.objs[4]],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__j__isnull=False),
[self.objs[4]],
)
def test_isnull_key_or_none(self):
obj = NullableJSONModel.objects.create(value={"a": None})
self.assertCountEqual(
NullableJSONModel.objects.filter(
Q(value__a__isnull=True) | Q(value__a=None)
),
self.objs[:3] + self.objs[5:] + [obj],
)
def test_none_key(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__j=None),
[self.objs[4]],
)
def test_none_key_exclude(self):
obj = NullableJSONModel.objects.create(value={"j": 1})
if connection.vendor == "oracle":
# Oracle supports filtering JSON objects with NULL keys, but the
# current implementation doesn't support it.
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(value__j=None),
self.objs[1:4] + self.objs[5:] + [obj],
)
else:
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(value__j=None), [obj]
)
def test_shallow_list_lookup(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__0=1),
[self.objs[5]],
)
def test_shallow_obj_lookup(self):
self.assertCountEqual(
NullableJSONModel.objects.filter(value__a="b"),
[self.objs[3], self.objs[4]],
)
def test_obj_subquery_lookup(self):
qs = NullableJSONModel.objects.annotate(
field=Subquery(
NullableJSONModel.objects.filter(pk=OuterRef("pk")).values("value")
),
).filter(field__a="b")
self.assertCountEqual(qs, [self.objs[3], self.objs[4]])
def test_deep_lookup_objs(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__k__l="m"),
[self.objs[4]],
)
def test_shallow_lookup_obj_target(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__k={"l": "m"}),
[self.objs[4]],
)
def test_deep_lookup_array(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__1__0=2),
[self.objs[5]],
)
def test_deep_lookup_mixed(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__1__f="g"),
[self.objs[4]],
)
def test_deep_lookup_transform(self):
self.assertCountEqual(
NullableJSONModel.objects.filter(value__c__gt=2),
[self.objs[3], self.objs[4]],
)
self.assertCountEqual(
NullableJSONModel.objects.filter(value__c__gt=2.33),
[self.objs[3], self.objs[4]],
)
self.assertIs(NullableJSONModel.objects.filter(value__c__lt=5).exists(), False)
def test_lookup_exclude(self):
tests = [
(Q(value__a="b"), [self.objs[0]]),
(Q(value__foo="bax"), [self.objs[0], self.objs[7]]),
]
for condition, expected in tests:
self.assertCountEqual(
NullableJSONModel.objects.exclude(condition),
expected,
)
self.assertCountEqual(
NullableJSONModel.objects.filter(~condition),
expected,
)
def test_lookup_exclude_nonexistent_key(self):
# Values without the key are ignored.
condition = Q(value__foo="bax")
objs_with_value = [self.objs[6]]
objs_with_different_value = [self.objs[0], self.objs[7]]
self.assertCountEqual(
NullableJSONModel.objects.exclude(condition),
objs_with_different_value,
)
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(~condition),
objs_with_value,
)
self.assertCountEqual(
NullableJSONModel.objects.filter(condition | ~condition),
objs_with_value + objs_with_different_value,
)
self.assertCountEqual(
NullableJSONModel.objects.exclude(condition & ~condition),
objs_with_value + objs_with_different_value,
)
# Add the __isnull lookup to get an exhaustive set.
self.assertCountEqual(
NullableJSONModel.objects.exclude(condition & Q(value__foo__isnull=False)),
self.objs[0:6] + self.objs[7:],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition & Q(value__foo__isnull=False)),
objs_with_value,
)
def test_usage_in_subquery(self):
self.assertCountEqual(
NullableJSONModel.objects.filter(
id__in=NullableJSONModel.objects.filter(value__c=14),
),
self.objs[3:5],
)
@skipUnlessDBFeature("supports_json_field_contains")
def test_array_key_contains(self):
tests = [
([], [self.objs[7]]),
("bar", [self.objs[7]]),
(["bar"], [self.objs[7]]),
("ar", []),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__bar__contains=value),
expected,
)
def test_key_iexact(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__iexact="BaR").exists(), True
)
self.assertIs(
NullableJSONModel.objects.filter(value__foo__iexact='"BaR"').exists(), False
)
def test_key_in(self):
tests = [
("value__c__in", [14], self.objs[3:5]),
("value__c__in", [14, 15], self.objs[3:5]),
("value__0__in", [1], [self.objs[5]]),
("value__0__in", [1, 3], [self.objs[5]]),
("value__foo__in", ["bar"], [self.objs[7]]),
(
"value__foo__in",
[KeyTransform("foo", KeyTransform("bax", "value"))],
[self.objs[7]],
),
("value__foo__in", [F("value__bax__foo")], [self.objs[7]]),
(
"value__foo__in",
[KeyTransform("foo", KeyTransform("bax", "value")), "baz"],
[self.objs[7]],
),
("value__foo__in", [F("value__bax__foo"), "baz"], [self.objs[7]]),
("value__foo__in", ["bar", "baz"], [self.objs[7]]),
("value__bar__in", [["foo", "bar"]], [self.objs[7]]),
("value__bar__in", [["foo", "bar"], ["a"]], [self.objs[7]]),
("value__bax__in", [{"foo": "bar"}, {"a": "b"}], [self.objs[7]]),
("value__h__in", [True, "foo"], [self.objs[4]]),
("value__i__in", [False, "foo"], [self.objs[4]]),
]
for lookup, value, expected in tests:
with self.subTest(lookup=lookup, value=value):
self.assertCountEqual(
NullableJSONModel.objects.filter(**{lookup: value}),
expected,
)
def test_key_values(self):
qs = NullableJSONModel.objects.filter(value__h=True)
tests = [
("value__a", "b"),
("value__c", 14),
("value__d", ["e", {"f": "g"}]),
("value__h", True),
("value__i", False),
("value__j", None),
("value__k", {"l": "m"}),
("value__n", [None, True, False]),
("value__p", 4.2),
("value__r", {"s": True, "t": False}),
]
for lookup, expected in tests:
with self.subTest(lookup=lookup):
self.assertEqual(qs.values_list(lookup, flat=True).get(), expected)
def test_key_values_boolean(self):
qs = NullableJSONModel.objects.filter(value__h=True, value__i=False)
tests = [
("value__h", True),
("value__i", False),
]
for lookup, expected in tests:
with self.subTest(lookup=lookup):
self.assertIs(qs.values_list(lookup, flat=True).get(), expected)
@skipUnlessDBFeature("supports_json_field_contains")
def test_key_contains(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__contains="ar").exists(), False
)
self.assertIs(
NullableJSONModel.objects.filter(value__foo__contains="bar").exists(), True
)
def test_key_icontains(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__icontains="Ar").exists(), True
)
def test_key_startswith(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__startswith="b").exists(), True
)
def test_key_istartswith(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__istartswith="B").exists(), True
)
def test_key_endswith(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__endswith="r").exists(), True
)
def test_key_iendswith(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__iendswith="R").exists(), True
)
def test_key_regex(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__regex=r"^bar$").exists(), True
)
def test_key_iregex(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__iregex=r"^bAr$").exists(), True
)
def test_key_quoted_string(self):
self.assertEqual(
NullableJSONModel.objects.filter(value__o='"quoted"').get(),
self.objs[4],
)
@skipUnlessDBFeature("has_json_operators")
def test_key_sql_injection(self):
with CaptureQueriesContext(connection) as queries:
self.assertIs(
NullableJSONModel.objects.filter(
**{
"""value__test' = '"a"') OR 1 = 1 OR ('d""": "x",
}
).exists(),
False,
)
self.assertIn(
"""."value" -> 'test'' = ''"a"'') OR 1 = 1 OR (''d') = '"x"' """,
queries[0]["sql"],
)
@skipIfDBFeature("has_json_operators")
def test_key_sql_injection_escape(self):
query = str(
JSONModel.objects.filter(
**{
"""value__test") = '"a"' OR 1 = 1 OR ("d""": "x",
}
).query
)
self.assertIn('"test\\"', query)
self.assertIn('\\"d', query)
def test_key_escape(self):
obj = NullableJSONModel.objects.create(value={"%total": 10})
self.assertEqual(
NullableJSONModel.objects.filter(**{"value__%total": 10}).get(), obj
)
def test_none_key_and_exact_lookup(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__a="b", value__j=None),
[self.objs[4]],
)
def test_lookups_with_key_transform(self):
tests = (
("value__baz__has_key", "c"),
("value__baz__has_keys", ["a", "c"]),
("value__baz__has_any_keys", ["a", "x"]),
("value__has_key", KeyTextTransform("foo", "value")),
)
for lookup, value in tests:
with self.subTest(lookup=lookup):
self.assertIs(
NullableJSONModel.objects.filter(
**{lookup: value},
).exists(),
True,
)
@skipUnlessDBFeature("supports_json_field_contains")
def test_contains_contained_by_with_key_transform(self):
tests = [
("value__d__contains", "e"),
("value__d__contains", [{"f": "g"}]),
("value__contains", KeyTransform("bax", "value")),
("value__contains", F("value__bax")),
("value__baz__contains", {"a": "b"}),
("value__baz__contained_by", {"a": "b", "c": "d", "e": "f"}),
(
"value__contained_by",
KeyTransform(
"x",
RawSQL(
self.raw_sql,
['{"x": {"a": "b", "c": 1, "d": "e"}}'],
),
),
),
]
# For databases where {'f': 'g'} (without surrounding []) matches
# [{'f': 'g'}].
if not connection.features.json_key_contains_list_matching_requires_list:
tests.append(("value__d__contains", {"f": "g"}))
for lookup, value in tests:
with self.subTest(lookup=lookup, value=value):
self.assertIs(
NullableJSONModel.objects.filter(
**{lookup: value},
).exists(),
True,
)
def test_join_key_transform_annotation_expression(self):
related_obj = RelatedJSONModel.objects.create(
value={"d": ["f", "e"]},
json_model=self.objs[4],
)
RelatedJSONModel.objects.create(
value={"d": ["e", "f"]},
json_model=self.objs[4],
)
self.assertSequenceEqual(
RelatedJSONModel.objects.annotate(
key=F("value__d"),
related_key=F("json_model__value__d"),
chain=F("key__1"),
expr=Cast("key", models.JSONField()),
).filter(chain=F("related_key__0")),
[related_obj],
)
|
0bed8f1352fcc060d730af79942f0bdb61cd3f34a743b55bfcce4fb0edad132f | from unittest import mock
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import Prefetch, QuerySet, prefetch_related_objects
from django.db.models.query import get_prefetcher
from django.db.models.sql import Query
from django.test import TestCase, override_settings
from django.test.utils import CaptureQueriesContext, ignore_warnings
from django.utils.deprecation import RemovedInDjango50Warning
from .models import (
Article,
Author,
Author2,
AuthorAddress,
AuthorWithAge,
Bio,
Book,
Bookmark,
BookReview,
BookWithYear,
Comment,
Department,
Employee,
FavoriteAuthors,
House,
LessonEntry,
ModelIterableSubclass,
Person,
Qualification,
Reader,
Room,
TaggedItem,
Teacher,
WordEntry,
)
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title="Poems")
cls.book2 = Book.objects.create(title="Jane Eyre")
cls.book3 = Book.objects.create(title="Wuthering Heights")
cls.book4 = Book.objects.create(title="Sense and Sensibility")
cls.author1 = Author.objects.create(name="Charlotte", first_book=cls.book1)
cls.author2 = Author.objects.create(name="Anne", first_book=cls.book1)
cls.author3 = Author.objects.create(name="Emily", first_book=cls.book1)
cls.author4 = Author.objects.create(name="Jane", first_book=cls.book4)
cls.book1.authors.add(cls.author1, cls.author2, cls.author3)
cls.book2.authors.add(cls.author1)
cls.book3.authors.add(cls.author3)
cls.book4.authors.add(cls.author4)
cls.reader1 = Reader.objects.create(name="Amy")
cls.reader2 = Reader.objects.create(name="Belinda")
cls.reader1.books_read.add(cls.book1, cls.book4)
cls.reader2.books_read.add(cls.book2, cls.book4)
class PrefetchRelatedTests(TestDataMixin, TestCase):
def assertWhereContains(self, sql, needle):
where_idx = sql.index("WHERE")
self.assertEqual(
sql.count(str(needle), where_idx),
1,
msg="WHERE clause doesn't contain %s, actual SQL: %s"
% (needle, sql[where_idx:]),
)
def test_m2m_forward(self):
with self.assertNumQueries(2):
lists = [
list(b.authors.all()) for b in Book.objects.prefetch_related("authors")
]
normal_lists = [list(b.authors.all()) for b in Book.objects.all()]
self.assertEqual(lists, normal_lists)
def test_m2m_reverse(self):
with self.assertNumQueries(2):
lists = [
list(a.books.all()) for a in Author.objects.prefetch_related("books")
]
normal_lists = [list(a.books.all()) for a in Author.objects.all()]
self.assertEqual(lists, normal_lists)
def test_foreignkey_forward(self):
with self.assertNumQueries(2):
books = [
a.first_book for a in Author.objects.prefetch_related("first_book")
]
normal_books = [a.first_book for a in Author.objects.all()]
self.assertEqual(books, normal_books)
def test_foreignkey_reverse(self):
with self.assertNumQueries(2):
[
list(b.first_time_authors.all())
for b in Book.objects.prefetch_related("first_time_authors")
]
self.assertSequenceEqual(self.book2.authors.all(), [self.author1])
def test_onetoone_reverse_no_match(self):
# Regression for #17439
with self.assertNumQueries(2):
book = Book.objects.prefetch_related("bookwithyear").all()[0]
with self.assertNumQueries(0):
with self.assertRaises(BookWithYear.DoesNotExist):
book.bookwithyear
def test_onetoone_reverse_with_to_field_pk(self):
"""
A model (Bio) with a OneToOneField primary key (author) that references
a non-pk field (name) on the related model (Author) is prefetchable.
"""
Bio.objects.bulk_create(
[
Bio(author=self.author1),
Bio(author=self.author2),
Bio(author=self.author3),
]
)
authors = Author.objects.filter(
name__in=[self.author1, self.author2, self.author3],
).prefetch_related("bio")
with self.assertNumQueries(2):
for author in authors:
self.assertEqual(author.name, author.bio.author.name)
def test_survives_clone(self):
with self.assertNumQueries(2):
[
list(b.first_time_authors.all())
for b in Book.objects.prefetch_related("first_time_authors").exclude(
id=1000
)
]
def test_len(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related("first_time_authors")
len(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_bool(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related("first_time_authors")
bool(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_count(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related("first_time_authors")
[b.first_time_authors.count() for b in qs]
def test_exists(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related("first_time_authors")
[b.first_time_authors.exists() for b in qs]
def test_in_and_prefetch_related(self):
"""
Regression test for #20242 - QuerySet "in" didn't work the first time
when using prefetch_related. This was fixed by the removal of chunked
reads from QuerySet iteration in
70679243d1786e03557c28929f9762a119e3ac14.
"""
qs = Book.objects.prefetch_related("first_time_authors")
self.assertIn(qs[0], qs)
def test_clear(self):
with self.assertNumQueries(5):
with_prefetch = Author.objects.prefetch_related("books")
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
def test_m2m_then_m2m(self):
"""A m2m can be followed through another m2m."""
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related("books__read_by")
lists = [
[[str(r) for r in b.read_by.all()] for b in a.books.all()] for a in qs
]
self.assertEqual(
lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
],
)
def test_overriding_prefetch(self):
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related("books", "books__read_by")
lists = [
[[str(r) for r in b.read_by.all()] for b in a.books.all()] for a in qs
]
self.assertEqual(
lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
],
)
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related("books__read_by", "books")
lists = [
[[str(r) for r in b.read_by.all()] for b in a.books.all()] for a in qs
]
self.assertEqual(
lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
],
)
def test_get(self):
"""
Objects retrieved with .get() get the prefetch behavior.
"""
# Need a double
with self.assertNumQueries(3):
author = Author.objects.prefetch_related("books__read_by").get(
name="Charlotte"
)
lists = [[str(r) for r in b.read_by.all()] for b in author.books.all()]
self.assertEqual(lists, [["Amy"], ["Belinda"]]) # Poems, Jane Eyre
def test_foreign_key_then_m2m(self):
"""
A m2m relation can be followed after a relation like ForeignKey that
doesn't have many objects.
"""
with self.assertNumQueries(2):
qs = Author.objects.select_related("first_book").prefetch_related(
"first_book__read_by"
)
lists = [[str(r) for r in a.first_book.read_by.all()] for a in qs]
self.assertEqual(lists, [["Amy"], ["Amy"], ["Amy"], ["Amy", "Belinda"]])
def test_reverse_one_to_one_then_m2m(self):
"""
A m2m relation can be followed after going through the select_related
reverse of an o2o.
"""
qs = Author.objects.prefetch_related("bio__books").select_related("bio")
with self.assertNumQueries(1):
list(qs.all())
Bio.objects.create(author=self.author1)
with self.assertNumQueries(2):
list(qs.all())
def test_attribute_error(self):
qs = Reader.objects.prefetch_related("books_read__xyz")
msg = (
"Cannot find 'xyz' on Book object, 'books_read__xyz' "
"is an invalid parameter to prefetch_related()"
)
with self.assertRaisesMessage(AttributeError, msg) as cm:
list(qs)
self.assertIn("prefetch_related", str(cm.exception))
def test_invalid_final_lookup(self):
qs = Book.objects.prefetch_related("authors__name")
msg = (
"'authors__name' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to prefetch_related()."
)
with self.assertRaisesMessage(ValueError, msg) as cm:
list(qs)
self.assertIn("prefetch_related", str(cm.exception))
self.assertIn("name", str(cm.exception))
def test_prefetch_eq(self):
prefetch_1 = Prefetch("authors", queryset=Author.objects.all())
prefetch_2 = Prefetch("books", queryset=Book.objects.all())
self.assertEqual(prefetch_1, prefetch_1)
self.assertEqual(prefetch_1, mock.ANY)
self.assertNotEqual(prefetch_1, prefetch_2)
def test_forward_m2m_to_attr_conflict(self):
msg = "to_attr=authors conflicts with a field on the Book model."
authors = Author.objects.all()
with self.assertRaisesMessage(ValueError, msg):
list(
Book.objects.prefetch_related(
Prefetch("authors", queryset=authors, to_attr="authors"),
)
)
# Without the ValueError, an author was deleted due to the implicit
# save of the relation assignment.
self.assertEqual(self.book1.authors.count(), 3)
def test_reverse_m2m_to_attr_conflict(self):
msg = "to_attr=books conflicts with a field on the Author model."
poems = Book.objects.filter(title="Poems")
with self.assertRaisesMessage(ValueError, msg):
list(
Author.objects.prefetch_related(
Prefetch("books", queryset=poems, to_attr="books"),
)
)
# Without the ValueError, a book was deleted due to the implicit
# save of reverse relation assignment.
self.assertEqual(self.author1.books.count(), 2)
def test_m2m_then_reverse_fk_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related("authors__addresses"))
sql = queries[-1]["sql"]
self.assertWhereContains(sql, self.author1.name)
def test_m2m_then_m2m_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related("authors__favorite_authors"))
sql = queries[-1]["sql"]
self.assertWhereContains(sql, self.author1.name)
def test_m2m_then_reverse_one_to_one_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related("authors__authorwithage"))
sql = queries[-1]["sql"]
self.assertWhereContains(sql, self.author1.id)
def test_filter_deferred(self):
"""
Related filtering of prefetched querysets is deferred on m2m and
reverse m2o relations until necessary.
"""
add_q = Query.add_q
for relation in ["authors", "first_time_authors"]:
with self.subTest(relation=relation):
with mock.patch.object(
Query,
"add_q",
autospec=True,
side_effect=lambda self, q: add_q(self, q),
) as add_q_mock:
list(Book.objects.prefetch_related(relation))
self.assertEqual(add_q_mock.call_count, 1)
def test_named_values_list(self):
qs = Author.objects.prefetch_related("books")
self.assertCountEqual(
[value.name for value in qs.values_list("name", named=True)],
["Anne", "Charlotte", "Emily", "Jane"],
)
def test_m2m_prefetching_iterator_with_chunks(self):
with self.assertNumQueries(3):
authors = [
b.authors.first()
for b in Book.objects.prefetch_related("authors").iterator(chunk_size=2)
]
self.assertEqual(
authors,
[self.author1, self.author1, self.author3, self.author4],
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_m2m_prefetching_iterator_without_chunks(self):
# prefetch_related() is ignored.
with self.assertNumQueries(5):
authors = [
b.authors.first()
for b in Book.objects.prefetch_related("authors").iterator()
]
self.assertEqual(
authors,
[self.author1, self.author1, self.author3, self.author4],
)
def test_m2m_prefetching_iterator_without_chunks_warning(self):
msg = (
"Using QuerySet.iterator() after prefetch_related() without "
"specifying chunk_size is deprecated."
)
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
Book.objects.prefetch_related("authors").iterator()
class RawQuerySetTests(TestDataMixin, TestCase):
def test_basic(self):
with self.assertNumQueries(2):
books = Book.objects.raw(
"SELECT * FROM prefetch_related_book WHERE id = %s", (self.book1.id,)
).prefetch_related("authors")
book1 = list(books)[0]
with self.assertNumQueries(0):
self.assertCountEqual(
book1.authors.all(), [self.author1, self.author2, self.author3]
)
def test_prefetch_before_raw(self):
with self.assertNumQueries(2):
books = Book.objects.prefetch_related("authors").raw(
"SELECT * FROM prefetch_related_book WHERE id = %s", (self.book1.id,)
)
book1 = list(books)[0]
with self.assertNumQueries(0):
self.assertCountEqual(
book1.authors.all(), [self.author1, self.author2, self.author3]
)
def test_clear(self):
with self.assertNumQueries(5):
with_prefetch = Author.objects.raw(
"SELECT * FROM prefetch_related_author"
).prefetch_related("books")
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
class CustomPrefetchTests(TestCase):
@classmethod
def traverse_qs(cls, obj_iter, path):
"""
Helper method that returns a list containing a list of the objects in the
obj_iter. Then for each object in the obj_iter, the path will be
recursively travelled and the found objects are added to the return value.
"""
ret_val = []
if hasattr(obj_iter, "all"):
obj_iter = obj_iter.all()
try:
iter(obj_iter)
except TypeError:
obj_iter = [obj_iter]
for obj in obj_iter:
rel_objs = []
for part in path:
if not part:
continue
try:
related = getattr(obj, part[0])
except ObjectDoesNotExist:
continue
if related is not None:
rel_objs.extend(cls.traverse_qs(related, [part[1:]]))
ret_val.append((obj, rel_objs))
return ret_val
@classmethod
def setUpTestData(cls):
cls.person1 = Person.objects.create(name="Joe")
cls.person2 = Person.objects.create(name="Mary")
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
cls.house1 = House.objects.create(
name="House 1", address="123 Main St", owner=cls.person1
)
cls.room1_1 = Room.objects.create(name="Dining room", house=cls.house1)
cls.room1_2 = Room.objects.create(name="Lounge", house=cls.house1)
cls.room1_3 = Room.objects.create(name="Kitchen", house=cls.house1)
cls.house1.main_room = cls.room1_1
cls.house1.save()
cls.person1.houses.add(cls.house1)
cls.house2 = House.objects.create(
name="House 2", address="45 Side St", owner=cls.person1
)
cls.room2_1 = Room.objects.create(name="Dining room", house=cls.house2)
cls.room2_2 = Room.objects.create(name="Lounge", house=cls.house2)
cls.room2_3 = Room.objects.create(name="Kitchen", house=cls.house2)
cls.house2.main_room = cls.room2_1
cls.house2.save()
cls.person1.houses.add(cls.house2)
cls.house3 = House.objects.create(
name="House 3", address="6 Downing St", owner=cls.person2
)
cls.room3_1 = Room.objects.create(name="Dining room", house=cls.house3)
cls.room3_2 = Room.objects.create(name="Lounge", house=cls.house3)
cls.room3_3 = Room.objects.create(name="Kitchen", house=cls.house3)
cls.house3.main_room = cls.room3_1
cls.house3.save()
cls.person2.houses.add(cls.house3)
cls.house4 = House.objects.create(
name="house 4", address="7 Regents St", owner=cls.person2
)
cls.room4_1 = Room.objects.create(name="Dining room", house=cls.house4)
cls.room4_2 = Room.objects.create(name="Lounge", house=cls.house4)
cls.room4_3 = Room.objects.create(name="Kitchen", house=cls.house4)
cls.house4.main_room = cls.room4_1
cls.house4.save()
cls.person2.houses.add(cls.house4)
def test_traverse_qs(self):
qs = Person.objects.prefetch_related("houses")
related_objs_normal = ([list(p.houses.all()) for p in qs],)
related_objs_from_traverse = [
[inner[0] for inner in o[1]] for o in self.traverse_qs(qs, [["houses"]])
]
self.assertEqual(related_objs_normal, (related_objs_from_traverse,))
def test_ambiguous(self):
# Ambiguous: Lookup was already seen with a different queryset.
msg = (
"'houses' lookup was already seen with a different queryset. You "
"may need to adjust the ordering of your lookups."
)
# lookup.queryset shouldn't be evaluated.
with self.assertNumQueries(3):
with self.assertRaisesMessage(ValueError, msg):
self.traverse_qs(
Person.objects.prefetch_related(
"houses__rooms",
Prefetch("houses", queryset=House.objects.all()),
),
[["houses", "rooms"]],
)
# Ambiguous: Lookup houses_lst doesn't yet exist when performing
# houses_lst__rooms.
msg = (
"Cannot find 'houses_lst' on Person object, 'houses_lst__rooms' is "
"an invalid parameter to prefetch_related()"
)
with self.assertRaisesMessage(AttributeError, msg):
self.traverse_qs(
Person.objects.prefetch_related(
"houses_lst__rooms",
Prefetch(
"houses", queryset=House.objects.all(), to_attr="houses_lst"
),
),
[["houses", "rooms"]],
)
# Not ambiguous.
self.traverse_qs(
Person.objects.prefetch_related("houses__rooms", "houses"),
[["houses", "rooms"]],
)
self.traverse_qs(
Person.objects.prefetch_related(
"houses__rooms",
Prefetch("houses", queryset=House.objects.all(), to_attr="houses_lst"),
),
[["houses", "rooms"]],
)
def test_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
Person.objects.prefetch_related("houses"), [["houses"]]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch("houses")), [["houses"]]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch("houses", to_attr="houses_lst")
),
[["houses_lst"]],
)
self.assertEqual(lst1, lst2)
def test_reverse_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
House.objects.prefetch_related("occupants"), [["occupants"]]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch("occupants")), [["occupants"]]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(
Prefetch("occupants", to_attr="occupants_lst")
),
[["occupants_lst"]],
)
self.assertEqual(lst1, lst2)
def test_m2m_through_fk(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Room.objects.prefetch_related("house__occupants"),
[["house", "occupants"]],
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch("house__occupants")),
[["house", "occupants"]],
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(
Prefetch("house__occupants", to_attr="occupants_lst")
),
[["house", "occupants_lst"]],
)
self.assertEqual(lst1, lst2)
def test_m2m_through_gfk(self):
TaggedItem.objects.create(tag="houses", content_object=self.house1)
TaggedItem.objects.create(tag="houses", content_object=self.house2)
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
TaggedItem.objects.filter(tag="houses").prefetch_related(
"content_object__rooms"
),
[["content_object", "rooms"]],
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
TaggedItem.objects.prefetch_related(
Prefetch("content_object"),
Prefetch("content_object__rooms", to_attr="rooms_lst"),
),
[["content_object", "rooms_lst"]],
)
self.assertEqual(lst1, lst2)
def test_o2m_through_m2m(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Person.objects.prefetch_related("houses", "houses__rooms"),
[["houses", "rooms"]],
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch("houses"), "houses__rooms"),
[["houses", "rooms"]],
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch("houses"), Prefetch("houses__rooms")
),
[["houses", "rooms"]],
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch("houses", to_attr="houses_lst"), "houses_lst__rooms"
),
[["houses_lst", "rooms"]],
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch("houses", to_attr="houses_lst"),
Prefetch("houses_lst__rooms", to_attr="rooms_lst"),
),
[["houses_lst", "rooms_lst"]],
)
self.assertEqual(lst1, lst2)
def test_generic_rel(self):
bookmark = Bookmark.objects.create(url="http://www.djangoproject.com/")
TaggedItem.objects.create(content_object=bookmark, tag="django")
TaggedItem.objects.create(
content_object=bookmark, favorite=bookmark, tag="python"
)
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Bookmark.objects.prefetch_related(
"tags", "tags__content_object", "favorite_tags"
),
[["tags", "content_object"], ["favorite_tags"]],
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Bookmark.objects.prefetch_related(
Prefetch("tags", to_attr="tags_lst"),
Prefetch("tags_lst__content_object"),
Prefetch("favorite_tags"),
),
[["tags_lst", "content_object"], ["favorite_tags"]],
)
self.assertEqual(lst1, lst2)
def test_traverse_single_item_property(self):
# Control lookups.
with self.assertNumQueries(5):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
"houses__rooms",
"primary_house__occupants__houses",
),
[["primary_house", "occupants", "houses"]],
)
# Test lookups.
with self.assertNumQueries(5):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
"houses__rooms",
Prefetch("primary_house__occupants", to_attr="occupants_lst"),
"primary_house__occupants_lst__houses",
),
[["primary_house", "occupants_lst", "houses"]],
)
self.assertEqual(lst1, lst2)
def test_traverse_multiple_items_property(self):
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
"houses",
"all_houses__occupants__houses",
),
[["all_houses", "occupants", "houses"]],
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
"houses",
Prefetch("all_houses__occupants", to_attr="occupants_lst"),
"all_houses__occupants_lst__houses",
),
[["all_houses", "occupants_lst", "houses"]],
)
self.assertEqual(lst1, lst2)
def test_custom_qs(self):
# Test basic.
with self.assertNumQueries(2):
lst1 = list(Person.objects.prefetch_related("houses"))
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
"houses", queryset=House.objects.all(), to_attr="houses_lst"
)
)
)
self.assertEqual(
self.traverse_qs(lst1, [["houses"]]),
self.traverse_qs(lst2, [["houses_lst"]]),
)
# Test queryset filtering.
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
"houses",
queryset=House.objects.filter(
pk__in=[self.house1.pk, self.house3.pk]
),
to_attr="houses_lst",
)
)
)
self.assertEqual(len(lst2[0].houses_lst), 1)
self.assertEqual(lst2[0].houses_lst[0], self.house1)
self.assertEqual(len(lst2[1].houses_lst), 1)
self.assertEqual(lst2[1].houses_lst[0], self.house3)
# Test flattened.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related("houses__rooms"))
with self.assertNumQueries(3):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
"houses__rooms",
queryset=Room.objects.all(),
to_attr="rooms_lst",
)
)
)
self.assertEqual(
self.traverse_qs(lst1, [["houses", "rooms"]]),
self.traverse_qs(lst2, [["houses", "rooms_lst"]]),
)
# Test inner select_related.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related("houses__owner"))
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch("houses", queryset=House.objects.select_related("owner"))
)
)
self.assertEqual(
self.traverse_qs(lst1, [["houses", "owner"]]),
self.traverse_qs(lst2, [["houses", "owner"]]),
)
# Test inner prefetch.
inner_rooms_qs = Room.objects.filter(pk__in=[self.room1_1.pk, self.room1_2.pk])
houses_qs_prf = House.objects.prefetch_related(
Prefetch("rooms", queryset=inner_rooms_qs, to_attr="rooms_lst")
)
with self.assertNumQueries(4):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
"houses",
queryset=houses_qs_prf.filter(pk=self.house1.pk),
to_attr="houses_lst",
),
Prefetch("houses_lst__rooms_lst__main_room_of"),
)
)
self.assertEqual(len(lst2[0].houses_lst[0].rooms_lst), 2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0], self.room1_1)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[1], self.room1_2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0].main_room_of, self.house1)
self.assertEqual(len(lst2[1].houses_lst), 0)
# Test ForwardManyToOneDescriptor.
houses = House.objects.select_related("owner")
with self.assertNumQueries(6):
rooms = Room.objects.prefetch_related("house")
lst1 = self.traverse_qs(rooms, [["house", "owner"]])
with self.assertNumQueries(2):
rooms = Room.objects.prefetch_related(Prefetch("house", queryset=houses))
lst2 = self.traverse_qs(rooms, [["house", "owner"]])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
houses = House.objects.select_related("owner")
rooms = Room.objects.prefetch_related(
Prefetch("house", queryset=houses, to_attr="house_attr")
)
lst2 = self.traverse_qs(rooms, [["house_attr", "owner"]])
self.assertEqual(lst1, lst2)
room = Room.objects.prefetch_related(
Prefetch("house", queryset=houses.filter(address="DoesNotExist"))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, "house")
room = Room.objects.prefetch_related(
Prefetch(
"house",
queryset=houses.filter(address="DoesNotExist"),
to_attr="house_attr",
)
).first()
self.assertIsNone(room.house_attr)
rooms = Room.objects.prefetch_related(
Prefetch("house", queryset=House.objects.only("name"))
)
with self.assertNumQueries(2):
getattr(rooms.first().house, "name")
with self.assertNumQueries(3):
getattr(rooms.first().house, "address")
# Test ReverseOneToOneDescriptor.
houses = House.objects.select_related("owner")
with self.assertNumQueries(6):
rooms = Room.objects.prefetch_related("main_room_of")
lst1 = self.traverse_qs(rooms, [["main_room_of", "owner"]])
with self.assertNumQueries(2):
rooms = Room.objects.prefetch_related(
Prefetch("main_room_of", queryset=houses)
)
lst2 = self.traverse_qs(rooms, [["main_room_of", "owner"]])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
rooms = list(
Room.objects.prefetch_related(
Prefetch(
"main_room_of",
queryset=houses,
to_attr="main_room_of_attr",
)
)
)
lst2 = self.traverse_qs(rooms, [["main_room_of_attr", "owner"]])
self.assertEqual(lst1, lst2)
room = (
Room.objects.filter(main_room_of__isnull=False)
.prefetch_related(
Prefetch("main_room_of", queryset=houses.filter(address="DoesNotExist"))
)
.first()
)
with self.assertRaises(ObjectDoesNotExist):
getattr(room, "main_room_of")
room = (
Room.objects.filter(main_room_of__isnull=False)
.prefetch_related(
Prefetch(
"main_room_of",
queryset=houses.filter(address="DoesNotExist"),
to_attr="main_room_of_attr",
)
)
.first()
)
self.assertIsNone(room.main_room_of_attr)
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
person = Person.objects.prefetch_related(
Prefetch("houses", queryset=House.objects.filter(name="House 1")),
).get(pk=self.person1.pk)
self.assertEqual(
list(person.houses.all()),
list(person.houses.all().all()),
)
def test_nested_prefetch_related_are_not_overwritten(self):
# Regression test for #24873
houses_2 = House.objects.prefetch_related(Prefetch("rooms"))
persons = Person.objects.prefetch_related(Prefetch("houses", queryset=houses_2))
houses = House.objects.prefetch_related(Prefetch("occupants", queryset=persons))
list(houses) # queryset must be evaluated once to reproduce the bug.
self.assertEqual(
houses.all()[0].occupants.all()[0].houses.all()[1].rooms.all()[0],
self.room2_1,
)
def test_nested_prefetch_related_with_duplicate_prefetcher(self):
"""
Nested prefetches whose name clashes with descriptor names
(Person.houses here) are allowed.
"""
occupants = Person.objects.prefetch_related(
Prefetch("houses", to_attr="some_attr_name"),
Prefetch("houses", queryset=House.objects.prefetch_related("main_room")),
)
houses = House.objects.prefetch_related(
Prefetch("occupants", queryset=occupants)
)
with self.assertNumQueries(5):
self.traverse_qs(list(houses), [["occupants", "houses", "main_room"]])
def test_values_queryset(self):
msg = "Prefetch querysets cannot use raw(), values(), and values_list()."
with self.assertRaisesMessage(ValueError, msg):
Prefetch("houses", House.objects.values("pk"))
with self.assertRaisesMessage(ValueError, msg):
Prefetch("houses", House.objects.values_list("pk"))
# That error doesn't affect managers with custom ModelIterable subclasses
self.assertIs(
Teacher.objects_custom.all()._iterable_class, ModelIterableSubclass
)
Prefetch("teachers", Teacher.objects_custom.all())
def test_raw_queryset(self):
msg = "Prefetch querysets cannot use raw(), values(), and values_list()."
with self.assertRaisesMessage(ValueError, msg):
Prefetch("houses", House.objects.raw("select pk from house"))
def test_to_attr_doesnt_cache_through_attr_as_list(self):
house = House.objects.prefetch_related(
Prefetch("rooms", queryset=Room.objects.all(), to_attr="to_rooms"),
).get(pk=self.house3.pk)
self.assertIsInstance(house.rooms.all(), QuerySet)
def test_to_attr_cached_property(self):
persons = Person.objects.prefetch_related(
Prefetch("houses", House.objects.all(), to_attr="cached_all_houses"),
)
for person in persons:
# To bypass caching at the related descriptor level, don't use
# person.houses.all() here.
all_houses = list(House.objects.filter(occupants=person))
with self.assertNumQueries(0):
self.assertEqual(person.cached_all_houses, all_houses)
def test_filter_deferred(self):
"""
Related filtering of prefetched querysets is deferred until necessary.
"""
add_q = Query.add_q
with mock.patch.object(
Query,
"add_q",
autospec=True,
side_effect=lambda self, q: add_q(self, q),
) as add_q_mock:
list(
House.objects.prefetch_related(
Prefetch("occupants", queryset=Person.objects.all())
)
)
self.assertEqual(add_q_mock.call_count, 1)
class DefaultManagerTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.qual1 = Qualification.objects.create(name="BA")
cls.qual2 = Qualification.objects.create(name="BSci")
cls.qual3 = Qualification.objects.create(name="MA")
cls.qual4 = Qualification.objects.create(name="PhD")
cls.teacher1 = Teacher.objects.create(name="Mr Cleese")
cls.teacher2 = Teacher.objects.create(name="Mr Idle")
cls.teacher3 = Teacher.objects.create(name="Mr Chapman")
cls.teacher1.qualifications.add(cls.qual1, cls.qual2, cls.qual3, cls.qual4)
cls.teacher2.qualifications.add(cls.qual1)
cls.teacher3.qualifications.add(cls.qual2)
cls.dept1 = Department.objects.create(name="English")
cls.dept2 = Department.objects.create(name="Physics")
cls.dept1.teachers.add(cls.teacher1, cls.teacher2)
cls.dept2.teachers.add(cls.teacher1, cls.teacher3)
def test_m2m_then_m2m(self):
with self.assertNumQueries(3):
# When we prefetch the teachers, and force the query, we don't want
# the default manager on teachers to immediately get all the related
# qualifications, since this will do one query per teacher.
qs = Department.objects.prefetch_related("teachers")
depts = "".join(
"%s department: %s\n"
% (dept.name, ", ".join(str(t) for t in dept.teachers.all()))
for dept in qs
)
self.assertEqual(
depts,
"English department: Mr Cleese (BA, BSci, MA, PhD), Mr Idle (BA)\n"
"Physics department: Mr Cleese (BA, BSci, MA, PhD), Mr Chapman "
"(BSci)\n",
)
class GenericRelationTests(TestCase):
@classmethod
def setUpTestData(cls):
book1 = Book.objects.create(title="Winnie the Pooh")
book2 = Book.objects.create(title="Do you like green eggs and spam?")
book3 = Book.objects.create(title="Three Men In A Boat")
reader1 = Reader.objects.create(name="me")
reader2 = Reader.objects.create(name="you")
reader3 = Reader.objects.create(name="someone")
book1.read_by.add(reader1, reader2)
book2.read_by.add(reader2)
book3.read_by.add(reader3)
cls.book1, cls.book2, cls.book3 = book1, book2, book3
cls.reader1, cls.reader2, cls.reader3 = reader1, reader2, reader3
def test_prefetch_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="great", content_object=self.reader1)
TaggedItem.objects.create(tag="outstanding", content_object=self.book2)
TaggedItem.objects.create(tag="amazing", content_object=self.reader3)
# 1 for TaggedItem table, 1 for Book table, 1 for Reader table
with self.assertNumQueries(3):
qs = TaggedItem.objects.prefetch_related("content_object")
list(qs)
def test_prefetch_GFK_nonint_pk(self):
Comment.objects.create(comment="awesome", content_object=self.book1)
# 1 for Comment table, 1 for Book table
with self.assertNumQueries(2):
qs = Comment.objects.prefetch_related("content_object")
[c.content_object for c in qs]
def test_prefetch_GFK_uuid_pk(self):
article = Article.objects.create(name="Django")
Comment.objects.create(comment="awesome", content_object_uuid=article)
qs = Comment.objects.prefetch_related("content_object_uuid")
self.assertEqual([c.content_object_uuid for c in qs], [article])
def test_prefetch_GFK_fk_pk(self):
book = Book.objects.create(title="Poems")
book_with_year = BookWithYear.objects.create(book=book, published_year=2019)
Comment.objects.create(comment="awesome", content_object=book_with_year)
qs = Comment.objects.prefetch_related("content_object")
self.assertEqual([c.content_object for c in qs], [book_with_year])
def test_traverse_GFK(self):
"""
A 'content_object' can be traversed with prefetch_related() and
get to related objects on the other side (assuming it is suitably
filtered)
"""
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="awesome", content_object=self.book2)
TaggedItem.objects.create(tag="awesome", content_object=self.book3)
TaggedItem.objects.create(tag="awesome", content_object=self.reader1)
TaggedItem.objects.create(tag="awesome", content_object=self.reader2)
ct = ContentType.objects.get_for_model(Book)
# We get 3 queries - 1 for main query, 1 for content_objects since they
# all use the same table, and 1 for the 'read_by' relation.
with self.assertNumQueries(3):
# If we limit to books, we know that they will have 'read_by'
# attributes, so the following makes sense:
qs = TaggedItem.objects.filter(
content_type=ct, tag="awesome"
).prefetch_related("content_object__read_by")
readers_of_awesome_books = {
r.name for tag in qs for r in tag.content_object.read_by.all()
}
self.assertEqual(readers_of_awesome_books, {"me", "you", "someone"})
def test_nullable_GFK(self):
TaggedItem.objects.create(
tag="awesome", content_object=self.book1, created_by=self.reader1
)
TaggedItem.objects.create(tag="great", content_object=self.book2)
TaggedItem.objects.create(tag="rubbish", content_object=self.book3)
with self.assertNumQueries(2):
result = [
t.created_by for t in TaggedItem.objects.prefetch_related("created_by")
]
self.assertEqual(result, [t.created_by for t in TaggedItem.objects.all()])
def test_generic_relation(self):
bookmark = Bookmark.objects.create(url="http://www.djangoproject.com/")
TaggedItem.objects.create(content_object=bookmark, tag="django")
TaggedItem.objects.create(content_object=bookmark, tag="python")
with self.assertNumQueries(2):
tags = [
t.tag
for b in Bookmark.objects.prefetch_related("tags")
for t in b.tags.all()
]
self.assertEqual(sorted(tags), ["django", "python"])
def test_charfield_GFK(self):
b = Bookmark.objects.create(url="http://www.djangoproject.com/")
TaggedItem.objects.create(content_object=b, tag="django")
TaggedItem.objects.create(content_object=b, favorite=b, tag="python")
with self.assertNumQueries(3):
bookmark = Bookmark.objects.filter(pk=b.pk).prefetch_related(
"tags", "favorite_tags"
)[0]
self.assertEqual(
sorted(i.tag for i in bookmark.tags.all()), ["django", "python"]
)
self.assertEqual([i.tag for i in bookmark.favorite_tags.all()], ["python"])
def test_custom_queryset(self):
bookmark = Bookmark.objects.create(url="http://www.djangoproject.com/")
django_tag = TaggedItem.objects.create(content_object=bookmark, tag="django")
TaggedItem.objects.create(content_object=bookmark, tag="python")
with self.assertNumQueries(2):
bookmark = Bookmark.objects.prefetch_related(
Prefetch("tags", TaggedItem.objects.filter(tag="django")),
).get()
with self.assertNumQueries(0):
self.assertEqual(list(bookmark.tags.all()), [django_tag])
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
self.assertEqual(list(bookmark.tags.all()), list(bookmark.tags.all().all()))
def test_deleted_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="awesome", content_object=self.book2)
ct = ContentType.objects.get_for_model(Book)
book1_pk = self.book1.pk
self.book1.delete()
with self.assertNumQueries(2):
qs = TaggedItem.objects.filter(tag="awesome").prefetch_related(
"content_object"
)
result = [
(tag.object_id, tag.content_type_id, tag.content_object) for tag in qs
]
self.assertEqual(
result,
[
(book1_pk, ct.pk, None),
(self.book2.pk, ct.pk, self.book2),
],
)
class MultiTableInheritanceTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = BookWithYear.objects.create(title="Poems", published_year=2010)
cls.book2 = BookWithYear.objects.create(title="More poems", published_year=2011)
cls.author1 = AuthorWithAge.objects.create(
name="Jane", first_book=cls.book1, age=50
)
cls.author2 = AuthorWithAge.objects.create(
name="Tom", first_book=cls.book1, age=49
)
cls.author3 = AuthorWithAge.objects.create(
name="Robert", first_book=cls.book2, age=48
)
cls.author_address = AuthorAddress.objects.create(
author=cls.author1, address="SomeStreet 1"
)
cls.book2.aged_authors.add(cls.author2, cls.author3)
cls.br1 = BookReview.objects.create(book=cls.book1, notes="review book1")
cls.br2 = BookReview.objects.create(book=cls.book2, notes="review book2")
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = AuthorWithAge.objects.prefetch_related("addresses")
addresses = [
[str(address) for address in obj.addresses.all()] for obj in qs
]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_foreignkey_to_inherited(self):
with self.assertNumQueries(2):
qs = BookReview.objects.prefetch_related("book")
titles = [obj.book.title for obj in qs]
self.assertCountEqual(titles, ["Poems", "More poems"])
def test_m2m_to_inheriting_model(self):
qs = AuthorWithAge.objects.prefetch_related("books_with_year")
with self.assertNumQueries(2):
lst = [
[str(book) for book in author.books_with_year.all()] for author in qs
]
qs = AuthorWithAge.objects.all()
lst2 = [[str(book) for book in author.books_with_year.all()] for author in qs]
self.assertEqual(lst, lst2)
qs = BookWithYear.objects.prefetch_related("aged_authors")
with self.assertNumQueries(2):
lst = [[str(author) for author in book.aged_authors.all()] for book in qs]
qs = BookWithYear.objects.all()
lst2 = [[str(author) for author in book.aged_authors.all()] for book in qs]
self.assertEqual(lst, lst2)
def test_parent_link_prefetch(self):
with self.assertNumQueries(2):
[a.author for a in AuthorWithAge.objects.prefetch_related("author")]
@override_settings(DEBUG=True)
def test_child_link_prefetch(self):
with self.assertNumQueries(2):
authors = [
a.authorwithage
for a in Author.objects.prefetch_related("authorwithage")
]
# Regression for #18090: the prefetching query must include an IN clause.
# Note that on Oracle the table name is upper case in the generated SQL,
# thus the .lower() call.
self.assertIn("authorwithage", connection.queries[-1]["sql"].lower())
self.assertIn(" IN ", connection.queries[-1]["sql"])
self.assertEqual(authors, [a.authorwithage for a in Author.objects.all()])
class ForeignKeyToFieldTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book = Book.objects.create(title="Poems")
cls.author1 = Author.objects.create(name="Jane", first_book=cls.book)
cls.author2 = Author.objects.create(name="Tom", first_book=cls.book)
cls.author3 = Author.objects.create(name="Robert", first_book=cls.book)
cls.author_address = AuthorAddress.objects.create(
author=cls.author1, address="SomeStreet 1"
)
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
FavoriteAuthors.objects.create(author=cls.author2, likes_author=cls.author3)
FavoriteAuthors.objects.create(author=cls.author3, likes_author=cls.author1)
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = Author.objects.prefetch_related("addresses")
addresses = [
[str(address) for address in obj.addresses.all()] for obj in qs
]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_m2m(self):
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related("favorite_authors", "favors_me")
favorites = [
(
[str(i_like) for i_like in author.favorite_authors.all()],
[str(likes_me) for likes_me in author.favors_me.all()],
)
for author in qs
]
self.assertEqual(
favorites,
[
([str(self.author2)], [str(self.author3)]),
([str(self.author3)], [str(self.author1)]),
([str(self.author1)], [str(self.author2)]),
],
)
def test_m2m_manager_reused(self):
author = Author.objects.prefetch_related(
"favorite_authors",
"favors_me",
).first()
self.assertIs(author.favorite_authors, author.favorite_authors)
self.assertIs(author.favors_me, author.favors_me)
class LookupOrderingTest(TestCase):
"""
Test cases that demonstrate that ordering of lookups is important, and
ensure it is preserved.
"""
@classmethod
def setUpTestData(cls):
person1 = Person.objects.create(name="Joe")
person2 = Person.objects.create(name="Mary")
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house1 = House.objects.create(address="123 Main St")
room1_1 = Room.objects.create(name="Dining room", house=house1)
Room.objects.create(name="Lounge", house=house1)
Room.objects.create(name="Kitchen", house=house1)
house1.main_room = room1_1
house1.save()
person1.houses.add(house1)
house2 = House.objects.create(address="45 Side St")
room2_1 = Room.objects.create(name="Dining room", house=house2)
Room.objects.create(name="Lounge", house=house2)
house2.main_room = room2_1
house2.save()
person1.houses.add(house2)
house3 = House.objects.create(address="6 Downing St")
room3_1 = Room.objects.create(name="Dining room", house=house3)
Room.objects.create(name="Lounge", house=house3)
Room.objects.create(name="Kitchen", house=house3)
house3.main_room = room3_1
house3.save()
person2.houses.add(house3)
house4 = House.objects.create(address="7 Regents St")
room4_1 = Room.objects.create(name="Dining room", house=house4)
Room.objects.create(name="Lounge", house=house4)
house4.main_room = room4_1
house4.save()
person2.houses.add(house4)
def test_order(self):
with self.assertNumQueries(4):
# The following two queries must be done in the same order as written,
# otherwise 'primary_house' will cause non-prefetched lookups
qs = Person.objects.prefetch_related(
"houses__rooms", "primary_house__occupants"
)
[list(p.primary_house.occupants.all()) for p in qs]
class NullableTest(TestCase):
@classmethod
def setUpTestData(cls):
boss = Employee.objects.create(name="Peter")
Employee.objects.create(name="Joe", boss=boss)
Employee.objects.create(name="Angela", boss=boss)
def test_traverse_nullable(self):
# Because we use select_related() for 'boss', it doesn't need to be
# prefetched, but we can still traverse it although it contains some nulls
with self.assertNumQueries(2):
qs = Employee.objects.select_related("boss").prefetch_related("boss__serfs")
co_serfs = [
list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs
]
qs2 = Employee.objects.select_related("boss")
co_serfs2 = [
list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2
]
self.assertEqual(co_serfs, co_serfs2)
def test_prefetch_nullable(self):
# One for main employee, one for boss, one for serfs
with self.assertNumQueries(3):
qs = Employee.objects.prefetch_related("boss__serfs")
co_serfs = [
list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs
]
qs2 = Employee.objects.all()
co_serfs2 = [
list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2
]
self.assertEqual(co_serfs, co_serfs2)
def test_in_bulk(self):
"""
In-bulk does correctly prefetch objects by not using .iterator()
directly.
"""
boss1 = Employee.objects.create(name="Peter")
boss2 = Employee.objects.create(name="Jack")
with self.assertNumQueries(2):
# Prefetch is done and it does not cause any errors.
bulk = Employee.objects.prefetch_related("serfs").in_bulk(
[boss1.pk, boss2.pk]
)
for b in bulk.values():
list(b.serfs.all())
class MultiDbTests(TestCase):
databases = {"default", "other"}
def test_using_is_honored_m2m(self):
B = Book.objects.using("other")
A = Author.objects.using("other")
book1 = B.create(title="Poems")
book2 = B.create(title="Jane Eyre")
book3 = B.create(title="Wuthering Heights")
book4 = B.create(title="Sense and Sensibility")
author1 = A.create(name="Charlotte", first_book=book1)
author2 = A.create(name="Anne", first_book=book1)
author3 = A.create(name="Emily", first_book=book1)
author4 = A.create(name="Jane", first_book=book4)
book1.authors.add(author1, author2, author3)
book2.authors.add(author1)
book3.authors.add(author3)
book4.authors.add(author4)
# Forward
qs1 = B.prefetch_related("authors")
with self.assertNumQueries(2, using="other"):
books = "".join(
"%s (%s)\n"
% (book.title, ", ".join(a.name for a in book.authors.all()))
for book in qs1
)
self.assertEqual(
books,
"Poems (Charlotte, Anne, Emily)\n"
"Jane Eyre (Charlotte)\n"
"Wuthering Heights (Emily)\n"
"Sense and Sensibility (Jane)\n",
)
# Reverse
qs2 = A.prefetch_related("books")
with self.assertNumQueries(2, using="other"):
authors = "".join(
"%s: %s\n"
% (author.name, ", ".join(b.title for b in author.books.all()))
for author in qs2
)
self.assertEqual(
authors,
"Charlotte: Poems, Jane Eyre\n"
"Anne: Poems\n"
"Emily: Poems, Wuthering Heights\n"
"Jane: Sense and Sensibility\n",
)
def test_using_is_honored_fkey(self):
B = Book.objects.using("other")
A = Author.objects.using("other")
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Forward
with self.assertNumQueries(2, using="other"):
books = ", ".join(
a.first_book.title for a in A.prefetch_related("first_book")
)
self.assertEqual("Poems, Sense and Sensibility", books)
# Reverse
with self.assertNumQueries(2, using="other"):
books = "".join(
"%s (%s)\n"
% (b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related("first_time_authors")
)
self.assertEqual(
books,
"Poems (Charlotte Bronte)\nSense and Sensibility (Jane Austen)\n",
)
def test_using_is_honored_inheritance(self):
B = BookWithYear.objects.using("other")
A = AuthorWithAge.objects.using("other")
book1 = B.create(title="Poems", published_year=2010)
B.create(title="More poems", published_year=2011)
A.create(name="Jane", first_book=book1, age=50)
A.create(name="Tom", first_book=book1, age=49)
# parent link
with self.assertNumQueries(2, using="other"):
authors = ", ".join(a.author.name for a in A.prefetch_related("author"))
self.assertEqual(authors, "Jane, Tom")
# child link
with self.assertNumQueries(2, using="other"):
ages = ", ".join(
str(a.authorwithage.age) for a in A.prefetch_related("authorwithage")
)
self.assertEqual(ages, "50, 49")
def test_using_is_honored_custom_qs(self):
B = Book.objects.using("other")
A = Author.objects.using("other")
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Implicit hinting
with self.assertNumQueries(2, using="other"):
prefetch = Prefetch("first_time_authors", queryset=Author.objects.all())
books = "".join(
"%s (%s)\n"
% (b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch)
)
self.assertEqual(
books,
"Poems (Charlotte Bronte)\nSense and Sensibility (Jane Austen)\n",
)
# Explicit using on the same db.
with self.assertNumQueries(2, using="other"):
prefetch = Prefetch(
"first_time_authors", queryset=Author.objects.using("other")
)
books = "".join(
"%s (%s)\n"
% (b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch)
)
self.assertEqual(
books,
"Poems (Charlotte Bronte)\nSense and Sensibility (Jane Austen)\n",
)
# Explicit using on a different db.
with self.assertNumQueries(1, using="default"), self.assertNumQueries(
1, using="other"
):
prefetch = Prefetch(
"first_time_authors", queryset=Author.objects.using("default")
)
books = "".join(
"%s (%s)\n"
% (b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch)
)
self.assertEqual(books, "Poems ()\n" "Sense and Sensibility ()\n")
class Ticket19607Tests(TestCase):
@classmethod
def setUpTestData(cls):
LessonEntry.objects.bulk_create(
LessonEntry(id=id_, name1=name1, name2=name2)
for id_, name1, name2 in [
(1, "einfach", "simple"),
(2, "schwierig", "difficult"),
]
)
WordEntry.objects.bulk_create(
WordEntry(id=id_, lesson_entry_id=lesson_entry_id, name=name)
for id_, lesson_entry_id, name in [
(1, 1, "einfach"),
(2, 1, "simple"),
(3, 2, "schwierig"),
(4, 2, "difficult"),
]
)
def test_bug(self):
list(
WordEntry.objects.prefetch_related(
"lesson_entry", "lesson_entry__wordentry_set"
)
)
class Ticket21410Tests(TestCase):
@classmethod
def setUpTestData(cls):
book1 = Book.objects.create(title="Poems")
book2 = Book.objects.create(title="Jane Eyre")
book3 = Book.objects.create(title="Wuthering Heights")
book4 = Book.objects.create(title="Sense and Sensibility")
author1 = Author2.objects.create(name="Charlotte", first_book=book1)
author2 = Author2.objects.create(name="Anne", first_book=book1)
author3 = Author2.objects.create(name="Emily", first_book=book1)
author4 = Author2.objects.create(name="Jane", first_book=book4)
author1.favorite_books.add(book1, book2, book3)
author2.favorite_books.add(book1)
author3.favorite_books.add(book2)
author4.favorite_books.add(book3)
def test_bug(self):
list(Author2.objects.prefetch_related("first_book", "favorite_books"))
class Ticket21760Tests(TestCase):
@classmethod
def setUpTestData(cls):
cls.rooms = []
for _ in range(3):
house = House.objects.create()
for _ in range(3):
cls.rooms.append(Room.objects.create(house=house))
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house.main_room = cls.rooms[-3]
house.save()
def test_bug(self):
prefetcher = get_prefetcher(self.rooms[0], "house", "house")[0]
queryset = prefetcher.get_prefetch_queryset(list(Room.objects.all()))[0]
self.assertNotIn(" JOIN ", str(queryset.query))
class DirectPrefetchedObjectCacheReuseTests(TestCase):
"""
prefetch_related() reuses objects fetched in _prefetched_objects_cache.
When objects are prefetched and not stored as an instance attribute (often
intermediary relationships), they are saved to the
_prefetched_objects_cache attribute. prefetch_related() takes
_prefetched_objects_cache into account when determining whether an object
has been fetched[1] and retrieves results from it when it is populated [2].
[1]: #25546 (duplicate queries on nested Prefetch)
[2]: #27554 (queryset evaluation fails with a mix of nested and flattened
prefetches)
"""
@classmethod
def setUpTestData(cls):
cls.book1, cls.book2 = [
Book.objects.create(title="book1"),
Book.objects.create(title="book2"),
]
cls.author11, cls.author12, cls.author21 = [
Author.objects.create(first_book=cls.book1, name="Author11"),
Author.objects.create(first_book=cls.book1, name="Author12"),
Author.objects.create(first_book=cls.book2, name="Author21"),
]
cls.author1_address1, cls.author1_address2, cls.author2_address1 = [
AuthorAddress.objects.create(author=cls.author11, address="Happy place"),
AuthorAddress.objects.create(author=cls.author12, address="Haunted house"),
AuthorAddress.objects.create(author=cls.author21, address="Happy place"),
]
cls.bookwithyear1 = BookWithYear.objects.create(
title="Poems", published_year=2010
)
cls.bookreview1 = BookReview.objects.create(book=cls.bookwithyear1)
def test_detect_is_fetched(self):
"""
Nested prefetch_related() shouldn't trigger duplicate queries for the same
lookup.
"""
with self.assertNumQueries(3):
books = Book.objects.filter(title__in=["book1", "book2"],).prefetch_related(
Prefetch(
"first_time_authors",
Author.objects.prefetch_related(
Prefetch(
"addresses",
AuthorAddress.objects.filter(address="Happy place"),
)
),
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertSequenceEqual(
book1.first_time_authors.all(), [self.author11, self.author12]
)
self.assertSequenceEqual(book2.first_time_authors.all(), [self.author21])
self.assertSequenceEqual(
book1.first_time_authors.all()[0].addresses.all(),
[self.author1_address1],
)
self.assertSequenceEqual(
book1.first_time_authors.all()[1].addresses.all(), []
)
self.assertSequenceEqual(
book2.first_time_authors.all()[0].addresses.all(),
[self.author2_address1],
)
self.assertEqual(
list(book1.first_time_authors.all()),
list(book1.first_time_authors.all().all()),
)
self.assertEqual(
list(book2.first_time_authors.all()),
list(book2.first_time_authors.all().all()),
)
self.assertEqual(
list(book1.first_time_authors.all()[0].addresses.all()),
list(book1.first_time_authors.all()[0].addresses.all().all()),
)
self.assertEqual(
list(book1.first_time_authors.all()[1].addresses.all()),
list(book1.first_time_authors.all()[1].addresses.all().all()),
)
self.assertEqual(
list(book2.first_time_authors.all()[0].addresses.all()),
list(book2.first_time_authors.all()[0].addresses.all().all()),
)
def test_detect_is_fetched_with_to_attr(self):
with self.assertNumQueries(3):
books = Book.objects.filter(title__in=["book1", "book2"],).prefetch_related(
Prefetch(
"first_time_authors",
Author.objects.prefetch_related(
Prefetch(
"addresses",
AuthorAddress.objects.filter(address="Happy place"),
to_attr="happy_place",
)
),
to_attr="first_authors",
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertEqual(book1.first_authors, [self.author11, self.author12])
self.assertEqual(book2.first_authors, [self.author21])
self.assertEqual(
book1.first_authors[0].happy_place, [self.author1_address1]
)
self.assertEqual(book1.first_authors[1].happy_place, [])
self.assertEqual(
book2.first_authors[0].happy_place, [self.author2_address1]
)
def test_prefetch_reverse_foreign_key(self):
with self.assertNumQueries(2):
(bookwithyear1,) = BookWithYear.objects.prefetch_related("bookreview_set")
with self.assertNumQueries(0):
self.assertCountEqual(
bookwithyear1.bookreview_set.all(), [self.bookreview1]
)
with self.assertNumQueries(0):
prefetch_related_objects([bookwithyear1], "bookreview_set")
def test_add_clears_prefetched_objects(self):
bookwithyear = BookWithYear.objects.get(pk=self.bookwithyear1.pk)
prefetch_related_objects([bookwithyear], "bookreview_set")
self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1])
new_review = BookReview.objects.create()
bookwithyear.bookreview_set.add(new_review)
self.assertCountEqual(
bookwithyear.bookreview_set.all(), [self.bookreview1, new_review]
)
def test_remove_clears_prefetched_objects(self):
bookwithyear = BookWithYear.objects.get(pk=self.bookwithyear1.pk)
prefetch_related_objects([bookwithyear], "bookreview_set")
self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1])
bookwithyear.bookreview_set.remove(self.bookreview1)
self.assertCountEqual(bookwithyear.bookreview_set.all(), [])
class ReadPrefetchedObjectsCacheTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title="Les confessions Volume I")
cls.book2 = Book.objects.create(title="Candide")
cls.author1 = AuthorWithAge.objects.create(
name="Rousseau", first_book=cls.book1, age=70
)
cls.author2 = AuthorWithAge.objects.create(
name="Voltaire", first_book=cls.book2, age=65
)
cls.book1.authors.add(cls.author1)
cls.book2.authors.add(cls.author2)
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
def test_retrieves_results_from_prefetched_objects_cache(self):
"""
When intermediary results are prefetched without a destination
attribute, they are saved in the RelatedManager's cache
(_prefetched_objects_cache). prefetch_related() uses this cache
(#27554).
"""
authors = AuthorWithAge.objects.prefetch_related(
Prefetch(
"author",
queryset=Author.objects.prefetch_related(
# Results are saved in the RelatedManager's cache
# (_prefetched_objects_cache) and do not replace the
# RelatedManager on Author instances (favorite_authors)
Prefetch("favorite_authors__first_book"),
),
),
)
with self.assertNumQueries(4):
# AuthorWithAge -> Author -> FavoriteAuthors, Book
self.assertSequenceEqual(authors, [self.author1, self.author2])
class NestedPrefetchTests(TestCase):
@classmethod
def setUpTestData(cls):
house = House.objects.create(name="Big house", address="123 Main St")
cls.room = Room.objects.create(name="Kitchen", house=house)
def test_nested_prefetch_is_not_overwritten_by_related_object(self):
"""
The prefetched relationship is used rather than populating the reverse
relationship from the parent, when prefetching a set of child objects
related to a set of parent objects and the child queryset itself
specifies a prefetch back to the parent.
"""
queryset = House.objects.only("name").prefetch_related(
Prefetch(
"rooms",
queryset=Room.objects.prefetch_related(
Prefetch("house", queryset=House.objects.only("address")),
),
),
)
with self.assertNumQueries(3):
house = queryset.first()
self.assertIs(Room.house.is_cached(self.room), True)
with self.assertNumQueries(0):
house.rooms.first().house.address
class PrefetchLimitTests(TestDataMixin, TestCase):
def test_m2m_forward(self):
authors = Author.objects.all() # Meta.ordering
with self.assertNumQueries(3):
books = list(
Book.objects.prefetch_related(
Prefetch("authors", authors),
Prefetch("authors", authors[1:], to_attr="authors_sliced"),
)
)
for book in books:
with self.subTest(book=book):
self.assertEqual(book.authors_sliced, list(book.authors.all())[1:])
def test_m2m_reverse(self):
books = Book.objects.order_by("title")
with self.assertNumQueries(3):
authors = list(
Author.objects.prefetch_related(
Prefetch("books", books),
Prefetch("books", books[1:2], to_attr="books_sliced"),
)
)
for author in authors:
with self.subTest(author=author):
self.assertEqual(author.books_sliced, list(author.books.all())[1:2])
def test_foreignkey_reverse(self):
authors = Author.objects.order_by("-name")
with self.assertNumQueries(3):
books = list(
Book.objects.prefetch_related(
Prefetch(
"first_time_authors",
authors,
),
Prefetch(
"first_time_authors",
authors[1:],
to_attr="first_time_authors_sliced",
),
)
)
for book in books:
with self.subTest(book=book):
self.assertEqual(
book.first_time_authors_sliced,
list(book.first_time_authors.all())[1:],
)
def test_reverse_ordering(self):
authors = Author.objects.reverse() # Reverse Meta.ordering
with self.assertNumQueries(3):
books = list(
Book.objects.prefetch_related(
Prefetch("authors", authors),
Prefetch("authors", authors[1:], to_attr="authors_sliced"),
)
)
for book in books:
with self.subTest(book=book):
self.assertEqual(book.authors_sliced, list(book.authors.all())[1:])
|
0636c1b501a506532be8dfed3acc6fe70fbf7f8259431f5fe8fda055a4ee5914 | # Unittests for fixtures.
import json
import os
import re
from io import StringIO
from pathlib import Path
from django.core import management, serializers
from django.core.exceptions import ImproperlyConfigured
from django.core.serializers.base import DeserializationError
from django.db import IntegrityError, transaction
from django.db.models import signals
from django.test import (
TestCase,
TransactionTestCase,
override_settings,
skipIfDBFeature,
skipUnlessDBFeature,
)
from .models import (
Absolute,
Animal,
Article,
Book,
Child,
Circle1,
Circle2,
Circle3,
ExternalDependency,
M2MCircular1ThroughAB,
M2MCircular1ThroughBC,
M2MCircular1ThroughCA,
M2MCircular2ThroughAB,
M2MComplexA,
M2MComplexB,
M2MComplexCircular1A,
M2MComplexCircular1B,
M2MComplexCircular1C,
M2MComplexCircular2A,
M2MComplexCircular2B,
M2MSimpleA,
M2MSimpleB,
M2MSimpleCircularA,
M2MSimpleCircularB,
M2MThroughAB,
NaturalKeyWithFKDependency,
NKChild,
Parent,
Person,
RefToNKChild,
Store,
Stuff,
Thingy,
Widget,
)
_cur_dir = os.path.dirname(os.path.abspath(__file__))
class TestFixtures(TestCase):
def animal_pre_save_check(self, signal, sender, instance, **kwargs):
self.pre_save_checks.append(
(
"Count = %s (%s)" % (instance.count, type(instance.count)),
"Weight = %s (%s)" % (instance.weight, type(instance.weight)),
)
)
def test_duplicate_pk(self):
"""
This is a regression test for ticket #3790.
"""
# Load a fixture that uses PK=1
management.call_command(
"loaddata",
"sequence",
verbosity=0,
)
# Create a new animal. Without a sequence reset, this new object
# will take a PK of 1 (on Postgres), and the save will fail.
animal = Animal(
name="Platypus",
latin_name="Ornithorhynchus anatinus",
count=2,
weight=2.2,
)
animal.save()
self.assertGreater(animal.id, 1)
def test_loaddata_not_found_fields_not_ignore(self):
"""
Test for ticket #9279 -- Error is raised for entries in
the serialized data for fields that have been removed
from the database when not ignored.
"""
with self.assertRaises(DeserializationError):
management.call_command(
"loaddata",
"sequence_extra",
verbosity=0,
)
def test_loaddata_not_found_fields_ignore(self):
"""
Test for ticket #9279 -- Ignores entries in
the serialized data for fields that have been removed
from the database.
"""
management.call_command(
"loaddata",
"sequence_extra",
ignore=True,
verbosity=0,
)
self.assertEqual(Animal.specimens.all()[0].name, "Lion")
def test_loaddata_not_found_fields_ignore_xml(self):
"""
Test for ticket #19998 -- Ignore entries in the XML serialized data
for fields that have been removed from the model definition.
"""
management.call_command(
"loaddata",
"sequence_extra_xml",
ignore=True,
verbosity=0,
)
self.assertEqual(Animal.specimens.all()[0].name, "Wolf")
@skipIfDBFeature("interprets_empty_strings_as_nulls")
def test_pretty_print_xml(self):
"""
Regression test for ticket #4558 -- pretty printing of XML fixtures
doesn't affect parsing of None values.
"""
# Load a pretty-printed XML fixture with Nulls.
management.call_command(
"loaddata",
"pretty.xml",
verbosity=0,
)
self.assertIsNone(Stuff.objects.all()[0].name)
self.assertIsNone(Stuff.objects.all()[0].owner)
@skipUnlessDBFeature("interprets_empty_strings_as_nulls")
def test_pretty_print_xml_empty_strings(self):
"""
Regression test for ticket #4558 -- pretty printing of XML fixtures
doesn't affect parsing of None values.
"""
# Load a pretty-printed XML fixture with Nulls.
management.call_command(
"loaddata",
"pretty.xml",
verbosity=0,
)
self.assertEqual(Stuff.objects.all()[0].name, "")
self.assertIsNone(Stuff.objects.all()[0].owner)
def test_absolute_path(self):
"""
Regression test for ticket #6436 --
os.path.join will throw away the initial parts of a path if it
encounters an absolute path.
This means that if a fixture is specified as an absolute path,
we need to make sure we don't discover the absolute path in every
fixture directory.
"""
load_absolute_path = os.path.join(
os.path.dirname(__file__), "fixtures", "absolute.json"
)
management.call_command(
"loaddata",
load_absolute_path,
verbosity=0,
)
self.assertEqual(Absolute.objects.count(), 1)
def test_relative_path(self, path=["fixtures", "absolute.json"]):
relative_path = os.path.join(*path)
cwd = os.getcwd()
try:
os.chdir(_cur_dir)
management.call_command(
"loaddata",
relative_path,
verbosity=0,
)
finally:
os.chdir(cwd)
self.assertEqual(Absolute.objects.count(), 1)
@override_settings(FIXTURE_DIRS=[os.path.join(_cur_dir, "fixtures_1")])
def test_relative_path_in_fixture_dirs(self):
self.test_relative_path(path=["inner", "absolute.json"])
def test_path_containing_dots(self):
management.call_command(
"loaddata",
"path.containing.dots.json",
verbosity=0,
)
self.assertEqual(Absolute.objects.count(), 1)
def test_unknown_format(self):
"""
Test for ticket #4371 -- Loading data of an unknown format should fail
Validate that error conditions are caught correctly
"""
msg = (
"Problem installing fixture 'bad_fix.ture1': unkn is not a known "
"serialization format."
)
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command(
"loaddata",
"bad_fix.ture1.unkn",
verbosity=0,
)
@override_settings(SERIALIZATION_MODULES={"unkn": "unexistent.path"})
def test_unimportable_serializer(self):
"""
Failing serializer import raises the proper error
"""
with self.assertRaisesMessage(ImportError, "No module named 'unexistent'"):
management.call_command(
"loaddata",
"bad_fix.ture1.unkn",
verbosity=0,
)
def test_invalid_data(self):
"""
Test for ticket #4371 -- Loading a fixture file with invalid data
using explicit filename.
Test for ticket #18213 -- warning conditions are caught correctly
"""
msg = "No fixture data found for 'bad_fixture2'. (File format may be invalid.)"
with self.assertWarnsMessage(RuntimeWarning, msg):
management.call_command(
"loaddata",
"bad_fixture2.xml",
verbosity=0,
)
def test_invalid_data_no_ext(self):
"""
Test for ticket #4371 -- Loading a fixture file with invalid data
without file extension.
Test for ticket #18213 -- warning conditions are caught correctly
"""
msg = "No fixture data found for 'bad_fixture2'. (File format may be invalid.)"
with self.assertWarnsMessage(RuntimeWarning, msg):
management.call_command(
"loaddata",
"bad_fixture2",
verbosity=0,
)
def test_empty(self):
"""
Test for ticket #18213 -- Loading a fixture file with no data output a warning.
Previously empty fixture raises an error exception, see ticket #4371.
"""
msg = "No fixture data found for 'empty'. (File format may be invalid.)"
with self.assertWarnsMessage(RuntimeWarning, msg):
management.call_command(
"loaddata",
"empty",
verbosity=0,
)
def test_error_message(self):
"""
Regression for #9011 - error message is correct.
Change from error to warning for ticket #18213.
"""
msg = "No fixture data found for 'bad_fixture2'. (File format may be invalid.)"
with self.assertWarnsMessage(RuntimeWarning, msg):
management.call_command(
"loaddata",
"bad_fixture2",
"animal",
verbosity=0,
)
def test_pg_sequence_resetting_checks(self):
"""
Test for ticket #7565 -- PostgreSQL sequence resetting checks shouldn't
ascend to parent models when inheritance is used
(since they are treated individually).
"""
management.call_command(
"loaddata",
"model-inheritance.json",
verbosity=0,
)
self.assertEqual(Parent.objects.all()[0].id, 1)
self.assertEqual(Child.objects.all()[0].id, 1)
def test_close_connection_after_loaddata(self):
"""
Test for ticket #7572 -- MySQL has a problem if the same connection is
used to create tables, load data, and then query over that data.
To compensate, we close the connection after running loaddata.
This ensures that a new connection is opened when test queries are
issued.
"""
management.call_command(
"loaddata",
"big-fixture.json",
verbosity=0,
)
articles = Article.objects.exclude(id=9)
self.assertEqual(
list(articles.values_list("id", flat=True)), [1, 2, 3, 4, 5, 6, 7, 8]
)
# Just for good measure, run the same query again.
# Under the influence of ticket #7572, this will
# give a different result to the previous call.
self.assertEqual(
list(articles.values_list("id", flat=True)), [1, 2, 3, 4, 5, 6, 7, 8]
)
def test_field_value_coerce(self):
"""
Test for tickets #8298, #9942 - Field values should be coerced into the
correct type by the deserializer, not as part of the database write.
"""
self.pre_save_checks = []
signals.pre_save.connect(self.animal_pre_save_check)
try:
management.call_command(
"loaddata",
"animal.xml",
verbosity=0,
)
self.assertEqual(
self.pre_save_checks,
[("Count = 42 (<class 'int'>)", "Weight = 1.2 (<class 'float'>)")],
)
finally:
signals.pre_save.disconnect(self.animal_pre_save_check)
def test_dumpdata_uses_default_manager(self):
"""
Regression for #11286
Dumpdata honors the default manager. Dump the current contents of
the database as a JSON fixture
"""
management.call_command(
"loaddata",
"animal.xml",
verbosity=0,
)
management.call_command(
"loaddata",
"sequence.json",
verbosity=0,
)
animal = Animal(
name="Platypus",
latin_name="Ornithorhynchus anatinus",
count=2,
weight=2.2,
)
animal.save()
out = StringIO()
management.call_command(
"dumpdata",
"fixtures_regress.animal",
format="json",
stdout=out,
)
# Output order isn't guaranteed, so check for parts
data = out.getvalue()
# Get rid of artifacts like '000000002' to eliminate the differences
# between different Python versions.
data = re.sub("0{6,}[0-9]", "", data)
animals_data = sorted(
[
{
"pk": 1,
"model": "fixtures_regress.animal",
"fields": {
"count": 3,
"weight": 1.2,
"name": "Lion",
"latin_name": "Panthera leo",
},
},
{
"pk": 10,
"model": "fixtures_regress.animal",
"fields": {
"count": 42,
"weight": 1.2,
"name": "Emu",
"latin_name": "Dromaius novaehollandiae",
},
},
{
"pk": animal.pk,
"model": "fixtures_regress.animal",
"fields": {
"count": 2,
"weight": 2.2,
"name": "Platypus",
"latin_name": "Ornithorhynchus anatinus",
},
},
],
key=lambda x: x["pk"],
)
data = sorted(json.loads(data), key=lambda x: x["pk"])
self.maxDiff = 1024
self.assertEqual(data, animals_data)
def test_proxy_model_included(self):
"""
Regression for #11428 - Proxy models aren't included when you dumpdata
"""
out = StringIO()
# Create an instance of the concrete class
widget = Widget.objects.create(name="grommet")
management.call_command(
"dumpdata",
"fixtures_regress.widget",
"fixtures_regress.widgetproxy",
format="json",
stdout=out,
)
self.assertJSONEqual(
out.getvalue(),
'[{"pk": %d, "model": "fixtures_regress.widget", '
'"fields": {"name": "grommet"}}]' % widget.pk,
)
@skipUnlessDBFeature("supports_forward_references")
def test_loaddata_works_when_fixture_has_forward_refs(self):
"""
Forward references cause fixtures not to load in MySQL (InnoDB).
"""
management.call_command(
"loaddata",
"forward_ref.json",
verbosity=0,
)
self.assertEqual(Book.objects.all()[0].id, 1)
self.assertEqual(Person.objects.all()[0].id, 4)
def test_loaddata_raises_error_when_fixture_has_invalid_foreign_key(self):
"""
Data with nonexistent child key references raises error.
"""
with self.assertRaisesMessage(IntegrityError, "Problem installing fixture"):
management.call_command(
"loaddata",
"forward_ref_bad_data.json",
verbosity=0,
)
@skipUnlessDBFeature("supports_forward_references")
@override_settings(
FIXTURE_DIRS=[
os.path.join(_cur_dir, "fixtures_1"),
os.path.join(_cur_dir, "fixtures_2"),
]
)
def test_loaddata_forward_refs_split_fixtures(self):
"""
Regression for #17530 - should be able to cope with forward references
when the fixtures are not in the same files or directories.
"""
management.call_command(
"loaddata",
"forward_ref_1.json",
"forward_ref_2.json",
verbosity=0,
)
self.assertEqual(Book.objects.all()[0].id, 1)
self.assertEqual(Person.objects.all()[0].id, 4)
def test_loaddata_no_fixture_specified(self):
"""
Error is quickly reported when no fixtures is provided in the command
line.
"""
msg = (
"No database fixture specified. Please provide the path of at least one "
"fixture in the command line."
)
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command(
"loaddata",
verbosity=0,
)
def test_ticket_20820(self):
"""
Regression for ticket #20820 -- loaddata on a model that inherits
from a model with a M2M shouldn't blow up.
"""
management.call_command(
"loaddata",
"special-article.json",
verbosity=0,
)
def test_ticket_22421(self):
"""
Regression for ticket #22421 -- loaddata on a model that inherits from
a grand-parent model with a M2M but via an abstract parent shouldn't
blow up.
"""
management.call_command(
"loaddata",
"feature.json",
verbosity=0,
)
def test_loaddata_with_m2m_to_self(self):
"""
Regression test for ticket #17946.
"""
management.call_command(
"loaddata",
"m2mtoself.json",
verbosity=0,
)
@override_settings(
FIXTURE_DIRS=[
os.path.join(_cur_dir, "fixtures_1"),
os.path.join(_cur_dir, "fixtures_1"),
]
)
def test_fixture_dirs_with_duplicates(self):
"""
settings.FIXTURE_DIRS cannot contain duplicates in order to avoid
repeated fixture loading.
"""
with self.assertRaisesMessage(
ImproperlyConfigured, "settings.FIXTURE_DIRS contains duplicates."
):
management.call_command("loaddata", "absolute.json", verbosity=0)
@override_settings(FIXTURE_DIRS=[os.path.join(_cur_dir, "fixtures")])
def test_fixture_dirs_with_default_fixture_path(self):
"""
settings.FIXTURE_DIRS cannot contain a default fixtures directory
for application (app/fixtures) in order to avoid repeated fixture loading.
"""
msg = (
"'%s' is a default fixture directory for the '%s' app "
"and cannot be listed in settings.FIXTURE_DIRS."
% (os.path.join(_cur_dir, "fixtures"), "fixtures_regress")
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
management.call_command("loaddata", "absolute.json", verbosity=0)
@override_settings(FIXTURE_DIRS=[Path(_cur_dir) / "fixtures"])
def test_fixture_dirs_with_default_fixture_path_as_pathlib(self):
"""
settings.FIXTURE_DIRS cannot contain a default fixtures directory
for application (app/fixtures) in order to avoid repeated fixture loading.
"""
msg = (
"'%s' is a default fixture directory for the '%s' app "
"and cannot be listed in settings.FIXTURE_DIRS."
% (os.path.join(_cur_dir, "fixtures"), "fixtures_regress")
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
management.call_command("loaddata", "absolute.json", verbosity=0)
@override_settings(
FIXTURE_DIRS=[
os.path.join(_cur_dir, "fixtures_1"),
os.path.join(_cur_dir, "fixtures_2"),
]
)
def test_loaddata_with_valid_fixture_dirs(self):
management.call_command(
"loaddata",
"absolute.json",
verbosity=0,
)
@override_settings(FIXTURE_DIRS=[Path(_cur_dir) / "fixtures_1"])
def test_fixtures_dir_pathlib(self):
management.call_command("loaddata", "inner/absolute.json", verbosity=0)
self.assertQuerysetEqual(Absolute.objects.all(), [1], transform=lambda o: o.pk)
class NaturalKeyFixtureTests(TestCase):
def test_nk_deserialize(self):
"""
Test for ticket #13030 - Python based parser version
natural keys deserialize with fk to inheriting model
"""
management.call_command(
"loaddata",
"model-inheritance.json",
verbosity=0,
)
management.call_command(
"loaddata",
"nk-inheritance.json",
verbosity=0,
)
self.assertEqual(NKChild.objects.get(pk=1).data, "apple")
self.assertEqual(RefToNKChild.objects.get(pk=1).nk_fk.data, "apple")
def test_nk_deserialize_xml(self):
"""
Test for ticket #13030 - XML version
natural keys deserialize with fk to inheriting model
"""
management.call_command(
"loaddata",
"model-inheritance.json",
verbosity=0,
)
management.call_command(
"loaddata",
"nk-inheritance.json",
verbosity=0,
)
management.call_command(
"loaddata",
"nk-inheritance2.xml",
verbosity=0,
)
self.assertEqual(NKChild.objects.get(pk=2).data, "banana")
self.assertEqual(RefToNKChild.objects.get(pk=2).nk_fk.data, "apple")
def test_nk_on_serialize(self):
"""
Natural key requirements are taken into account when serializing models.
"""
management.call_command(
"loaddata",
"forward_ref_lookup.json",
verbosity=0,
)
out = StringIO()
management.call_command(
"dumpdata",
"fixtures_regress.book",
"fixtures_regress.person",
"fixtures_regress.store",
verbosity=0,
format="json",
use_natural_foreign_keys=True,
use_natural_primary_keys=True,
stdout=out,
)
self.assertJSONEqual(
out.getvalue(),
"""
[{"fields": {"main": null, "name": "Amazon"},
"model": "fixtures_regress.store"},
{"fields": {"main": null, "name": "Borders"},
"model": "fixtures_regress.store"},
{"fields": {"name": "Neal Stephenson"}, "model": "fixtures_regress.person"},
{"pk": 1, "model": "fixtures_regress.book",
"fields": {"stores": [["Amazon"], ["Borders"]],
"name": "Cryptonomicon", "author": ["Neal Stephenson"]}}]
""",
)
def test_dependency_sorting(self):
"""
It doesn't matter what order you mention the models, Store *must* be
serialized before then Person, and both must be serialized before Book.
"""
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [Book, Person, Store])]
)
self.assertEqual(sorted_deps, [Store, Person, Book])
def test_dependency_sorting_2(self):
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [Book, Store, Person])]
)
self.assertEqual(sorted_deps, [Store, Person, Book])
def test_dependency_sorting_3(self):
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [Store, Book, Person])]
)
self.assertEqual(sorted_deps, [Store, Person, Book])
def test_dependency_sorting_4(self):
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [Store, Person, Book])]
)
self.assertEqual(sorted_deps, [Store, Person, Book])
def test_dependency_sorting_5(self):
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [Person, Book, Store])]
)
self.assertEqual(sorted_deps, [Store, Person, Book])
def test_dependency_sorting_6(self):
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [Person, Store, Book])]
)
self.assertEqual(sorted_deps, [Store, Person, Book])
def test_dependency_sorting_dangling(self):
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [Person, Circle1, Store, Book])]
)
self.assertEqual(sorted_deps, [Circle1, Store, Person, Book])
def test_dependency_sorting_tight_circular(self):
with self.assertRaisesMessage(
RuntimeError,
"Can't resolve dependencies for fixtures_regress.Circle1, "
"fixtures_regress.Circle2 in serialized app list.",
):
serializers.sort_dependencies(
[("fixtures_regress", [Person, Circle2, Circle1, Store, Book])]
)
def test_dependency_sorting_tight_circular_2(self):
with self.assertRaisesMessage(
RuntimeError,
"Can't resolve dependencies for fixtures_regress.Circle1, "
"fixtures_regress.Circle2 in serialized app list.",
):
serializers.sort_dependencies(
[("fixtures_regress", [Circle1, Book, Circle2])]
)
def test_dependency_self_referential(self):
with self.assertRaisesMessage(
RuntimeError,
"Can't resolve dependencies for fixtures_regress.Circle3 in "
"serialized app list.",
):
serializers.sort_dependencies([("fixtures_regress", [Book, Circle3])])
def test_dependency_sorting_long(self):
with self.assertRaisesMessage(
RuntimeError,
"Can't resolve dependencies for fixtures_regress.Circle1, "
"fixtures_regress.Circle2, fixtures_regress.Circle3 in serialized "
"app list.",
):
serializers.sort_dependencies(
[("fixtures_regress", [Person, Circle2, Circle1, Circle3, Store, Book])]
)
def test_dependency_sorting_normal(self):
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [Person, ExternalDependency, Book])]
)
self.assertEqual(sorted_deps, [Person, Book, ExternalDependency])
def test_normal_pk(self):
"""
Normal primary keys work on a model with natural key capabilities.
"""
management.call_command(
"loaddata",
"non_natural_1.json",
verbosity=0,
)
management.call_command(
"loaddata",
"forward_ref_lookup.json",
verbosity=0,
)
management.call_command(
"loaddata",
"non_natural_2.xml",
verbosity=0,
)
books = Book.objects.all()
self.assertQuerysetEqual(
books,
[
"<Book: Cryptonomicon by Neal Stephenson (available at Amazon, "
"Borders)>",
"<Book: Ender's Game by Orson Scott Card (available at Collins "
"Bookstore)>",
"<Book: Permutation City by Greg Egan (available at Angus and "
"Robertson)>",
],
transform=repr,
)
class NaturalKeyFixtureOnOtherDatabaseTests(TestCase):
databases = {"other"}
def test_natural_key_dependencies(self):
"""
Natural keys with foreing keys in dependencies works in a multiple
database setup.
"""
management.call_command(
"loaddata",
"nk_with_foreign_key.json",
database="other",
verbosity=0,
)
obj = NaturalKeyWithFKDependency.objects.using("other").get()
self.assertEqual(obj.name, "The Lord of the Rings")
self.assertEqual(obj.author.name, "J.R.R. Tolkien")
class M2MNaturalKeyFixtureTests(TestCase):
"""Tests for ticket #14426."""
def test_dependency_sorting_m2m_simple(self):
"""
M2M relations without explicit through models SHOULD count as dependencies
Regression test for bugs that could be caused by flawed fixes to
#14226, namely if M2M checks are removed from sort_dependencies
altogether.
"""
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [M2MSimpleA, M2MSimpleB])]
)
self.assertEqual(sorted_deps, [M2MSimpleB, M2MSimpleA])
def test_dependency_sorting_m2m_simple_circular(self):
"""
Resolving circular M2M relations without explicit through models should
fail loudly
"""
with self.assertRaisesMessage(
RuntimeError,
"Can't resolve dependencies for fixtures_regress.M2MSimpleCircularA, "
"fixtures_regress.M2MSimpleCircularB in serialized app list.",
):
serializers.sort_dependencies(
[("fixtures_regress", [M2MSimpleCircularA, M2MSimpleCircularB])]
)
def test_dependency_sorting_m2m_complex(self):
"""
M2M relations with explicit through models should NOT count as
dependencies. The through model itself will have dependencies, though.
"""
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [M2MComplexA, M2MComplexB, M2MThroughAB])]
)
# Order between M2MComplexA and M2MComplexB doesn't matter. The through
# model has dependencies to them though, so it should come last.
self.assertEqual(sorted_deps[-1], M2MThroughAB)
def test_dependency_sorting_m2m_complex_circular_1(self):
"""
Circular M2M relations with explicit through models should be serializable
"""
A, B, C, AtoB, BtoC, CtoA = (
M2MComplexCircular1A,
M2MComplexCircular1B,
M2MComplexCircular1C,
M2MCircular1ThroughAB,
M2MCircular1ThroughBC,
M2MCircular1ThroughCA,
)
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [A, B, C, AtoB, BtoC, CtoA])]
)
# The dependency sorting should not result in an error, and the
# through model should have dependencies to the other models and as
# such come last in the list.
self.assertEqual(sorted_deps[:3], [A, B, C])
self.assertEqual(sorted_deps[3:], [AtoB, BtoC, CtoA])
def test_dependency_sorting_m2m_complex_circular_2(self):
"""
Circular M2M relations with explicit through models should be serializable
This test tests the circularity with explicit natural_key.dependencies
"""
sorted_deps = serializers.sort_dependencies(
[
(
"fixtures_regress",
[M2MComplexCircular2A, M2MComplexCircular2B, M2MCircular2ThroughAB],
)
]
)
self.assertEqual(sorted_deps[:2], [M2MComplexCircular2A, M2MComplexCircular2B])
self.assertEqual(sorted_deps[2:], [M2MCircular2ThroughAB])
def test_dump_and_load_m2m_simple(self):
"""
Test serializing and deserializing back models with simple M2M relations
"""
a = M2MSimpleA.objects.create(data="a")
b1 = M2MSimpleB.objects.create(data="b1")
b2 = M2MSimpleB.objects.create(data="b2")
a.b_set.add(b1)
a.b_set.add(b2)
out = StringIO()
management.call_command(
"dumpdata",
"fixtures_regress.M2MSimpleA",
"fixtures_regress.M2MSimpleB",
use_natural_foreign_keys=True,
stdout=out,
)
for model in [M2MSimpleA, M2MSimpleB]:
model.objects.all().delete()
objects = serializers.deserialize("json", out.getvalue())
for obj in objects:
obj.save()
new_a = M2MSimpleA.objects.get_by_natural_key("a")
self.assertCountEqual(new_a.b_set.all(), [b1, b2])
class TestTicket11101(TransactionTestCase):
available_apps = ["fixtures_regress"]
@skipUnlessDBFeature("supports_transactions")
def test_ticket_11101(self):
"""Fixtures can be rolled back (ticket #11101)."""
with transaction.atomic():
management.call_command(
"loaddata",
"thingy.json",
verbosity=0,
)
self.assertEqual(Thingy.objects.count(), 1)
transaction.set_rollback(True)
self.assertEqual(Thingy.objects.count(), 0)
class TestLoadFixtureFromOtherAppDirectory(TestCase):
"""
#23612 -- fixtures path should be normalized to allow referencing relative
paths on Windows.
"""
current_dir = os.path.abspath(os.path.dirname(__file__))
# relative_prefix is something like tests/fixtures_regress or
# fixtures_regress depending on how runtests.py is invoked.
# All path separators must be / in order to be a proper regression test on
# Windows, so replace as appropriate.
relative_prefix = os.path.relpath(current_dir, os.getcwd()).replace("\\", "/")
fixtures = [relative_prefix + "/fixtures/absolute.json"]
def test_fixtures_loaded(self):
count = Absolute.objects.count()
self.assertGreater(count, 0, "Fixtures not loaded properly.")
|
b42138b11e55546d59da16cd957eb3ac260477be62096d0f74617e87090d1754 | from django.db.models import Subquery, TextField
from django.db.models.functions import Coalesce, Lower
from django.test import TestCase
from django.utils import timezone
from ..models import Article, Author
lorem_ipsum = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua."""
class CoalesceTests(TestCase):
def test_basic(self):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.annotate(display_name=Coalesce("alias", "name"))
self.assertQuerysetEqual(
authors.order_by("name"), ["smithj", "Rhonda"], lambda a: a.display_name
)
def test_gt_two_expressions(self):
with self.assertRaisesMessage(
ValueError, "Coalesce must take at least two expressions"
):
Author.objects.annotate(display_name=Coalesce("alias"))
def test_mixed_values(self):
a1 = Author.objects.create(name="John Smith", alias="smithj")
a2 = Author.objects.create(name="Rhonda")
ar1 = Article.objects.create(
title="How to Django",
text=lorem_ipsum,
written=timezone.now(),
)
ar1.authors.add(a1)
ar1.authors.add(a2)
# mixed Text and Char
article = Article.objects.annotate(
headline=Coalesce("summary", "text", output_field=TextField()),
)
self.assertQuerysetEqual(
article.order_by("title"), [lorem_ipsum], lambda a: a.headline
)
# mixed Text and Char wrapped
article = Article.objects.annotate(
headline=Coalesce(
Lower("summary"), Lower("text"), output_field=TextField()
),
)
self.assertQuerysetEqual(
article.order_by("title"), [lorem_ipsum.lower()], lambda a: a.headline
)
def test_ordering(self):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.order_by(Coalesce("alias", "name"))
self.assertQuerysetEqual(authors, ["Rhonda", "John Smith"], lambda a: a.name)
authors = Author.objects.order_by(Coalesce("alias", "name").asc())
self.assertQuerysetEqual(authors, ["Rhonda", "John Smith"], lambda a: a.name)
authors = Author.objects.order_by(Coalesce("alias", "name").desc())
self.assertQuerysetEqual(authors, ["John Smith", "Rhonda"], lambda a: a.name)
def test_empty_queryset(self):
Author.objects.create(name="John Smith")
queryset = Author.objects.values("id")
tests = [
(queryset.none(), "QuerySet.none()"),
(queryset.filter(id=0), "QuerySet.filter(id=0)"),
(Subquery(queryset.none()), "Subquery(QuerySet.none())"),
(Subquery(queryset.filter(id=0)), "Subquery(Queryset.filter(id=0)"),
]
for empty_query, description in tests:
with self.subTest(description), self.assertNumQueries(1):
qs = Author.objects.annotate(annotation=Coalesce(empty_query, 42))
self.assertEqual(qs.first().annotation, 42)
|
72d3be365cb89bd6076822005e416be1e9e4acaffe79bc45b573426a9fd6d78c | import datetime
import decimal
import enum
import functools
import math
import os
import pathlib
import re
import sys
import uuid
from unittest import mock
try:
import zoneinfo
except ImportError:
from backports import zoneinfo
try:
import pytz
except ImportError:
pytz = None
import custom_migration_operations.more_operations
import custom_migration_operations.operations
from django import get_version
from django.conf import SettingsReference, settings
from django.core.validators import EmailValidator, RegexValidator
from django.db import migrations, models
from django.db.migrations.serializer import BaseSerializer
from django.db.migrations.writer import MigrationWriter, OperationWriter
from django.test import SimpleTestCase, ignore_warnings
from django.utils.deconstruct import deconstructible
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import SimpleLazyObject
from django.utils.timezone import get_default_timezone, get_fixed_timezone
from django.utils.translation import gettext_lazy as _
from .models import FoodManager, FoodQuerySet
class DeconstructibleInstances:
def deconstruct(self):
return ("DeconstructibleInstances", [], {})
class Money(decimal.Decimal):
def deconstruct(self):
return (
"%s.%s" % (self.__class__.__module__, self.__class__.__name__),
[str(self)],
{},
)
class TestModel1:
def upload_to(self):
return "/somewhere/dynamic/"
thing = models.FileField(upload_to=upload_to)
class TextEnum(enum.Enum):
A = "a-value"
B = "value-b"
class TextTranslatedEnum(enum.Enum):
A = _("a-value")
B = _("value-b")
class BinaryEnum(enum.Enum):
A = b"a-value"
B = b"value-b"
class IntEnum(enum.IntEnum):
A = 1
B = 2
class IntFlagEnum(enum.IntFlag):
A = 1
B = 2
class OperationWriterTests(SimpleTestCase):
def test_empty_signature(self):
operation = custom_migration_operations.operations.TestOperation()
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {"import custom_migration_operations.operations"})
self.assertEqual(
buff,
"custom_migration_operations.operations.TestOperation(\n),",
)
def test_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation(1, 2)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {"import custom_migration_operations.operations"})
self.assertEqual(
buff,
"custom_migration_operations.operations.ArgsOperation(\n"
" arg1=1,\n"
" arg2=2,\n"
"),",
)
def test_kwargs_signature(self):
operation = custom_migration_operations.operations.KwargsOperation(kwarg1=1)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {"import custom_migration_operations.operations"})
self.assertEqual(
buff,
"custom_migration_operations.operations.KwargsOperation(\n"
" kwarg1=1,\n"
"),",
)
def test_args_kwargs_signature(self):
operation = custom_migration_operations.operations.ArgsKwargsOperation(
1, 2, kwarg2=4
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {"import custom_migration_operations.operations"})
self.assertEqual(
buff,
"custom_migration_operations.operations.ArgsKwargsOperation(\n"
" arg1=1,\n"
" arg2=2,\n"
" kwarg2=4,\n"
"),",
)
def test_nested_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation(
custom_migration_operations.operations.ArgsOperation(1, 2),
custom_migration_operations.operations.KwargsOperation(kwarg1=3, kwarg2=4),
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {"import custom_migration_operations.operations"})
self.assertEqual(
buff,
"custom_migration_operations.operations.ArgsOperation(\n"
" arg1=custom_migration_operations.operations.ArgsOperation(\n"
" arg1=1,\n"
" arg2=2,\n"
" ),\n"
" arg2=custom_migration_operations.operations.KwargsOperation(\n"
" kwarg1=3,\n"
" kwarg2=4,\n"
" ),\n"
"),",
)
def test_multiline_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation(
"test\n arg1", "test\narg2"
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {"import custom_migration_operations.operations"})
self.assertEqual(
buff,
"custom_migration_operations.operations.ArgsOperation(\n"
" arg1='test\\n arg1',\n"
" arg2='test\\narg2',\n"
"),",
)
def test_expand_args_signature(self):
operation = custom_migration_operations.operations.ExpandArgsOperation([1, 2])
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {"import custom_migration_operations.operations"})
self.assertEqual(
buff,
"custom_migration_operations.operations.ExpandArgsOperation(\n"
" arg=[\n"
" 1,\n"
" 2,\n"
" ],\n"
"),",
)
def test_nested_operation_expand_args_signature(self):
operation = custom_migration_operations.operations.ExpandArgsOperation(
arg=[
custom_migration_operations.operations.KwargsOperation(
kwarg1=1,
kwarg2=2,
),
]
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {"import custom_migration_operations.operations"})
self.assertEqual(
buff,
"custom_migration_operations.operations.ExpandArgsOperation(\n"
" arg=[\n"
" custom_migration_operations.operations.KwargsOperation(\n"
" kwarg1=1,\n"
" kwarg2=2,\n"
" ),\n"
" ],\n"
"),",
)
class WriterTests(SimpleTestCase):
"""
Tests the migration writer (makes migration files from Migration instances)
"""
class NestedEnum(enum.IntEnum):
A = 1
B = 2
class NestedChoices(models.TextChoices):
X = "X", "X value"
Y = "Y", "Y value"
def safe_exec(self, string, value=None):
d = {}
try:
exec(string, globals(), d)
except Exception as e:
if value:
self.fail(
"Could not exec %r (from value %r): %s" % (string.strip(), value, e)
)
else:
self.fail("Could not exec %r: %s" % (string.strip(), e))
return d
def serialize_round_trip(self, value):
string, imports = MigrationWriter.serialize(value)
return self.safe_exec(
"%s\ntest_value_result = %s" % ("\n".join(imports), string), value
)["test_value_result"]
def assertSerializedEqual(self, value):
self.assertEqual(self.serialize_round_trip(value), value)
def assertSerializedResultEqual(self, value, target):
self.assertEqual(MigrationWriter.serialize(value), target)
def assertSerializedFieldEqual(self, value):
new_value = self.serialize_round_trip(value)
self.assertEqual(value.__class__, new_value.__class__)
self.assertEqual(value.max_length, new_value.max_length)
self.assertEqual(value.null, new_value.null)
self.assertEqual(value.unique, new_value.unique)
def test_serialize_numbers(self):
self.assertSerializedEqual(1)
self.assertSerializedEqual(1.2)
self.assertTrue(math.isinf(self.serialize_round_trip(float("inf"))))
self.assertTrue(math.isinf(self.serialize_round_trip(float("-inf"))))
self.assertTrue(math.isnan(self.serialize_round_trip(float("nan"))))
self.assertSerializedEqual(decimal.Decimal("1.3"))
self.assertSerializedResultEqual(
decimal.Decimal("1.3"), ("Decimal('1.3')", {"from decimal import Decimal"})
)
self.assertSerializedEqual(Money("1.3"))
self.assertSerializedResultEqual(
Money("1.3"),
("migrations.test_writer.Money('1.3')", {"import migrations.test_writer"}),
)
def test_serialize_constants(self):
self.assertSerializedEqual(None)
self.assertSerializedEqual(True)
self.assertSerializedEqual(False)
def test_serialize_strings(self):
self.assertSerializedEqual(b"foobar")
string, imports = MigrationWriter.serialize(b"foobar")
self.assertEqual(string, "b'foobar'")
self.assertSerializedEqual("föobár")
string, imports = MigrationWriter.serialize("foobar")
self.assertEqual(string, "'foobar'")
def test_serialize_multiline_strings(self):
self.assertSerializedEqual(b"foo\nbar")
string, imports = MigrationWriter.serialize(b"foo\nbar")
self.assertEqual(string, "b'foo\\nbar'")
self.assertSerializedEqual("föo\nbár")
string, imports = MigrationWriter.serialize("foo\nbar")
self.assertEqual(string, "'foo\\nbar'")
def test_serialize_collections(self):
self.assertSerializedEqual({1: 2})
self.assertSerializedEqual(["a", 2, True, None])
self.assertSerializedEqual({2, 3, "eighty"})
self.assertSerializedEqual({"lalalala": ["yeah", "no", "maybe"]})
self.assertSerializedEqual(_("Hello"))
def test_serialize_builtin_types(self):
self.assertSerializedEqual([list, tuple, dict, set, frozenset])
self.assertSerializedResultEqual(
[list, tuple, dict, set, frozenset],
("[list, tuple, dict, set, frozenset]", set()),
)
def test_serialize_lazy_objects(self):
pattern = re.compile(r"^foo$")
lazy_pattern = SimpleLazyObject(lambda: pattern)
self.assertEqual(self.serialize_round_trip(lazy_pattern), pattern)
def test_serialize_enums(self):
self.assertSerializedResultEqual(
TextEnum.A,
("migrations.test_writer.TextEnum['A']", {"import migrations.test_writer"}),
)
self.assertSerializedResultEqual(
TextTranslatedEnum.A,
(
"migrations.test_writer.TextTranslatedEnum['A']",
{"import migrations.test_writer"},
),
)
self.assertSerializedResultEqual(
BinaryEnum.A,
(
"migrations.test_writer.BinaryEnum['A']",
{"import migrations.test_writer"},
),
)
self.assertSerializedResultEqual(
IntEnum.B,
("migrations.test_writer.IntEnum['B']", {"import migrations.test_writer"}),
)
self.assertSerializedResultEqual(
self.NestedEnum.A,
(
"migrations.test_writer.WriterTests.NestedEnum['A']",
{"import migrations.test_writer"},
),
)
self.assertSerializedEqual(self.NestedEnum.A)
field = models.CharField(
default=TextEnum.B, choices=[(m.value, m) for m in TextEnum]
)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.CharField(choices=["
"('a-value', migrations.test_writer.TextEnum['A']), "
"('value-b', migrations.test_writer.TextEnum['B'])], "
"default=migrations.test_writer.TextEnum['B'])",
)
field = models.CharField(
default=TextTranslatedEnum.A,
choices=[(m.value, m) for m in TextTranslatedEnum],
)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.CharField(choices=["
"('a-value', migrations.test_writer.TextTranslatedEnum['A']), "
"('value-b', migrations.test_writer.TextTranslatedEnum['B'])], "
"default=migrations.test_writer.TextTranslatedEnum['A'])",
)
field = models.CharField(
default=BinaryEnum.B, choices=[(m.value, m) for m in BinaryEnum]
)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.CharField(choices=["
"(b'a-value', migrations.test_writer.BinaryEnum['A']), "
"(b'value-b', migrations.test_writer.BinaryEnum['B'])], "
"default=migrations.test_writer.BinaryEnum['B'])",
)
field = models.IntegerField(
default=IntEnum.A, choices=[(m.value, m) for m in IntEnum]
)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.IntegerField(choices=["
"(1, migrations.test_writer.IntEnum['A']), "
"(2, migrations.test_writer.IntEnum['B'])], "
"default=migrations.test_writer.IntEnum['A'])",
)
def test_serialize_enum_flags(self):
self.assertSerializedResultEqual(
IntFlagEnum.A,
(
"migrations.test_writer.IntFlagEnum['A']",
{"import migrations.test_writer"},
),
)
self.assertSerializedResultEqual(
IntFlagEnum.B,
(
"migrations.test_writer.IntFlagEnum['B']",
{"import migrations.test_writer"},
),
)
field = models.IntegerField(
default=IntFlagEnum.A, choices=[(m.value, m) for m in IntFlagEnum]
)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.IntegerField(choices=["
"(1, migrations.test_writer.IntFlagEnum['A']), "
"(2, migrations.test_writer.IntFlagEnum['B'])], "
"default=migrations.test_writer.IntFlagEnum['A'])",
)
def test_serialize_choices(self):
class TextChoices(models.TextChoices):
A = "A", "A value"
B = "B", "B value"
class IntegerChoices(models.IntegerChoices):
A = 1, "One"
B = 2, "Two"
class DateChoices(datetime.date, models.Choices):
DATE_1 = 1969, 7, 20, "First date"
DATE_2 = 1969, 11, 19, "Second date"
self.assertSerializedResultEqual(TextChoices.A, ("'A'", set()))
self.assertSerializedResultEqual(IntegerChoices.A, ("1", set()))
self.assertSerializedResultEqual(
DateChoices.DATE_1,
("datetime.date(1969, 7, 20)", {"import datetime"}),
)
field = models.CharField(default=TextChoices.B, choices=TextChoices.choices)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.CharField(choices=[('A', 'A value'), ('B', 'B value')], "
"default='B')",
)
field = models.IntegerField(
default=IntegerChoices.B, choices=IntegerChoices.choices
)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.IntegerField(choices=[(1, 'One'), (2, 'Two')], default=2)",
)
field = models.DateField(
default=DateChoices.DATE_2, choices=DateChoices.choices
)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.DateField(choices=["
"(datetime.date(1969, 7, 20), 'First date'), "
"(datetime.date(1969, 11, 19), 'Second date')], "
"default=datetime.date(1969, 11, 19))",
)
def test_serialize_nested_class(self):
for nested_cls in [self.NestedEnum, self.NestedChoices]:
cls_name = nested_cls.__name__
with self.subTest(cls_name):
self.assertSerializedResultEqual(
nested_cls,
(
"migrations.test_writer.WriterTests.%s" % cls_name,
{"import migrations.test_writer"},
),
)
def test_serialize_uuid(self):
self.assertSerializedEqual(uuid.uuid1())
self.assertSerializedEqual(uuid.uuid4())
uuid_a = uuid.UUID("5c859437-d061-4847-b3f7-e6b78852f8c8")
uuid_b = uuid.UUID("c7853ec1-2ea3-4359-b02d-b54e8f1bcee2")
self.assertSerializedResultEqual(
uuid_a,
("uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8')", {"import uuid"}),
)
self.assertSerializedResultEqual(
uuid_b,
("uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2')", {"import uuid"}),
)
field = models.UUIDField(
choices=((uuid_a, "UUID A"), (uuid_b, "UUID B")), default=uuid_a
)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.UUIDField(choices=["
"(uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8'), 'UUID A'), "
"(uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2'), 'UUID B')], "
"default=uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8'))",
)
def test_serialize_pathlib(self):
# Pure path objects work in all platforms.
self.assertSerializedEqual(pathlib.PurePosixPath())
self.assertSerializedEqual(pathlib.PureWindowsPath())
path = pathlib.PurePosixPath("/path/file.txt")
expected = ("pathlib.PurePosixPath('/path/file.txt')", {"import pathlib"})
self.assertSerializedResultEqual(path, expected)
path = pathlib.PureWindowsPath("A:\\File.txt")
expected = ("pathlib.PureWindowsPath('A:/File.txt')", {"import pathlib"})
self.assertSerializedResultEqual(path, expected)
# Concrete path objects work on supported platforms.
if sys.platform == "win32":
self.assertSerializedEqual(pathlib.WindowsPath.cwd())
path = pathlib.WindowsPath("A:\\File.txt")
expected = ("pathlib.PureWindowsPath('A:/File.txt')", {"import pathlib"})
self.assertSerializedResultEqual(path, expected)
else:
self.assertSerializedEqual(pathlib.PosixPath.cwd())
path = pathlib.PosixPath("/path/file.txt")
expected = ("pathlib.PurePosixPath('/path/file.txt')", {"import pathlib"})
self.assertSerializedResultEqual(path, expected)
field = models.FilePathField(path=pathlib.PurePosixPath("/home/user"))
string, imports = MigrationWriter.serialize(field)
self.assertEqual(
string,
"models.FilePathField(path=pathlib.PurePosixPath('/home/user'))",
)
self.assertIn("import pathlib", imports)
def test_serialize_path_like(self):
with os.scandir(os.path.dirname(__file__)) as entries:
path_like = list(entries)[0]
expected = (repr(path_like.path), {})
self.assertSerializedResultEqual(path_like, expected)
field = models.FilePathField(path=path_like)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(string, "models.FilePathField(path=%r)" % path_like.path)
def test_serialize_functions(self):
with self.assertRaisesMessage(ValueError, "Cannot serialize function: lambda"):
self.assertSerializedEqual(lambda x: 42)
self.assertSerializedEqual(models.SET_NULL)
string, imports = MigrationWriter.serialize(models.SET(42))
self.assertEqual(string, "models.SET(42)")
self.serialize_round_trip(models.SET(42))
def test_serialize_datetime(self):
self.assertSerializedEqual(datetime.datetime.now())
self.assertSerializedEqual(datetime.datetime.now)
self.assertSerializedEqual(datetime.datetime.today())
self.assertSerializedEqual(datetime.datetime.today)
self.assertSerializedEqual(datetime.date.today())
self.assertSerializedEqual(datetime.date.today)
self.assertSerializedEqual(datetime.datetime.now().time())
self.assertSerializedEqual(
datetime.datetime(2014, 1, 1, 1, 1, tzinfo=get_default_timezone())
)
self.assertSerializedEqual(
datetime.datetime(2013, 12, 31, 22, 1, tzinfo=get_fixed_timezone(180))
)
self.assertSerializedResultEqual(
datetime.datetime(2014, 1, 1, 1, 1),
("datetime.datetime(2014, 1, 1, 1, 1)", {"import datetime"}),
)
with ignore_warnings(category=RemovedInDjango50Warning):
from django.utils.timezone import utc
for tzinfo in (utc, datetime.timezone.utc):
with self.subTest(tzinfo=tzinfo):
self.assertSerializedResultEqual(
datetime.datetime(2012, 1, 1, 1, 1, tzinfo=tzinfo),
(
"datetime.datetime"
"(2012, 1, 1, 1, 1, tzinfo=datetime.timezone.utc)",
{"import datetime"},
),
)
self.assertSerializedResultEqual(
datetime.datetime(
2012, 1, 1, 2, 1, tzinfo=zoneinfo.ZoneInfo("Europe/Paris")
),
(
"datetime.datetime(2012, 1, 1, 1, 1, tzinfo=datetime.timezone.utc)",
{"import datetime"},
),
)
if pytz:
self.assertSerializedResultEqual(
pytz.timezone("Europe/Paris").localize(
datetime.datetime(2012, 1, 1, 2, 1)
),
(
"datetime.datetime(2012, 1, 1, 1, 1, tzinfo=datetime.timezone.utc)",
{"import datetime"},
),
)
def test_serialize_fields(self):
self.assertSerializedFieldEqual(models.CharField(max_length=255))
self.assertSerializedResultEqual(
models.CharField(max_length=255),
("models.CharField(max_length=255)", {"from django.db import models"}),
)
self.assertSerializedFieldEqual(models.TextField(null=True, blank=True))
self.assertSerializedResultEqual(
models.TextField(null=True, blank=True),
(
"models.TextField(blank=True, null=True)",
{"from django.db import models"},
),
)
def test_serialize_settings(self):
self.assertSerializedEqual(
SettingsReference(settings.AUTH_USER_MODEL, "AUTH_USER_MODEL")
)
self.assertSerializedResultEqual(
SettingsReference("someapp.model", "AUTH_USER_MODEL"),
("settings.AUTH_USER_MODEL", {"from django.conf import settings"}),
)
def test_serialize_iterators(self):
self.assertSerializedResultEqual(
((x, x * x) for x in range(3)), ("((0, 0), (1, 1), (2, 4))", set())
)
def test_serialize_compiled_regex(self):
"""
Make sure compiled regex can be serialized.
"""
regex = re.compile(r"^\w+$")
self.assertSerializedEqual(regex)
def test_serialize_class_based_validators(self):
"""
Ticket #22943: Test serialization of class-based validators, including
compiled regexes.
"""
validator = RegexValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(
string, "django.core.validators.RegexValidator(message='hello')"
)
self.serialize_round_trip(validator)
# Test with a compiled regex.
validator = RegexValidator(regex=re.compile(r"^\w+$"))
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(
string,
"django.core.validators.RegexValidator(regex=re.compile('^\\\\w+$'))",
)
self.serialize_round_trip(validator)
# Test a string regex with flag
validator = RegexValidator(r"^[0-9]+$", flags=re.S)
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(
string,
"django.core.validators.RegexValidator('^[0-9]+$', "
"flags=re.RegexFlag['DOTALL'])",
)
self.serialize_round_trip(validator)
# Test message and code
validator = RegexValidator("^[-a-zA-Z0-9_]+$", "Invalid", "invalid")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(
string,
"django.core.validators.RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', "
"'invalid')",
)
self.serialize_round_trip(validator)
# Test with a subclass.
validator = EmailValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(
string, "django.core.validators.EmailValidator(message='hello')"
)
self.serialize_round_trip(validator)
validator = deconstructible(path="migrations.test_writer.EmailValidator")(
EmailValidator
)(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(
string, "migrations.test_writer.EmailValidator(message='hello')"
)
validator = deconstructible(path="custom.EmailValidator")(EmailValidator)(
message="hello"
)
with self.assertRaisesMessage(ImportError, "No module named 'custom'"):
MigrationWriter.serialize(validator)
validator = deconstructible(path="django.core.validators.EmailValidator2")(
EmailValidator
)(message="hello")
with self.assertRaisesMessage(
ValueError,
"Could not find object EmailValidator2 in django.core.validators.",
):
MigrationWriter.serialize(validator)
def test_serialize_complex_func_index(self):
index = models.Index(
models.Func("rating", function="ABS"),
models.Case(
models.When(name="special", then=models.Value("X")),
default=models.Value("other"),
),
models.ExpressionWrapper(
models.F("pages"),
output_field=models.IntegerField(),
),
models.OrderBy(models.F("name").desc()),
name="complex_func_index",
)
string, imports = MigrationWriter.serialize(index)
self.assertEqual(
string,
"models.Index(models.Func('rating', function='ABS'), "
"models.Case(models.When(name='special', then=models.Value('X')), "
"default=models.Value('other')), "
"models.ExpressionWrapper("
"models.F('pages'), output_field=models.IntegerField()), "
"models.OrderBy(models.OrderBy(models.F('name'), descending=True)), "
"name='complex_func_index')",
)
self.assertEqual(imports, {"from django.db import models"})
def test_serialize_empty_nonempty_tuple(self):
"""
Ticket #22679: makemigrations generates invalid code for (an empty
tuple) default_permissions = ()
"""
empty_tuple = ()
one_item_tuple = ("a",)
many_items_tuple = ("a", "b", "c")
self.assertSerializedEqual(empty_tuple)
self.assertSerializedEqual(one_item_tuple)
self.assertSerializedEqual(many_items_tuple)
def test_serialize_range(self):
string, imports = MigrationWriter.serialize(range(1, 5))
self.assertEqual(string, "range(1, 5)")
self.assertEqual(imports, set())
def test_serialize_builtins(self):
string, imports = MigrationWriter.serialize(range)
self.assertEqual(string, "range")
self.assertEqual(imports, set())
def test_serialize_unbound_method_reference(self):
"""An unbound method used within a class body can be serialized."""
self.serialize_round_trip(TestModel1.thing)
def test_serialize_local_function_reference(self):
"""A reference in a local scope can't be serialized."""
class TestModel2:
def upload_to(self):
return "somewhere dynamic"
thing = models.FileField(upload_to=upload_to)
with self.assertRaisesMessage(
ValueError, "Could not find function upload_to in migrations.test_writer"
):
self.serialize_round_trip(TestModel2.thing)
def test_serialize_managers(self):
self.assertSerializedEqual(models.Manager())
self.assertSerializedResultEqual(
FoodQuerySet.as_manager(),
(
"migrations.models.FoodQuerySet.as_manager()",
{"import migrations.models"},
),
)
self.assertSerializedEqual(FoodManager("a", "b"))
self.assertSerializedEqual(FoodManager("x", "y", c=3, d=4))
def test_serialize_frozensets(self):
self.assertSerializedEqual(frozenset())
self.assertSerializedEqual(frozenset("let it go"))
def test_serialize_set(self):
self.assertSerializedEqual(set())
self.assertSerializedResultEqual(set(), ("set()", set()))
self.assertSerializedEqual({"a"})
self.assertSerializedResultEqual({"a"}, ("{'a'}", set()))
def test_serialize_timedelta(self):
self.assertSerializedEqual(datetime.timedelta())
self.assertSerializedEqual(datetime.timedelta(minutes=42))
def test_serialize_functools_partial(self):
value = functools.partial(datetime.timedelta, 1, seconds=2)
result = self.serialize_round_trip(value)
self.assertEqual(result.func, value.func)
self.assertEqual(result.args, value.args)
self.assertEqual(result.keywords, value.keywords)
def test_serialize_functools_partialmethod(self):
value = functools.partialmethod(datetime.timedelta, 1, seconds=2)
result = self.serialize_round_trip(value)
self.assertIsInstance(result, functools.partialmethod)
self.assertEqual(result.func, value.func)
self.assertEqual(result.args, value.args)
self.assertEqual(result.keywords, value.keywords)
def test_serialize_type_none(self):
self.assertSerializedEqual(type(None))
def test_serialize_type_model(self):
self.assertSerializedEqual(models.Model)
self.assertSerializedResultEqual(
MigrationWriter.serialize(models.Model),
("('models.Model', {'from django.db import models'})", set()),
)
def test_simple_migration(self):
"""
Tests serializing a simple migration.
"""
fields = {
"charfield": models.DateTimeField(default=datetime.datetime.now),
"datetimefield": models.DateTimeField(default=datetime.datetime.now),
}
options = {
"verbose_name": "My model",
"verbose_name_plural": "My models",
}
migration = type(
"Migration",
(migrations.Migration,),
{
"operations": [
migrations.CreateModel(
"MyModel", tuple(fields.items()), options, (models.Model,)
),
migrations.CreateModel(
"MyModel2", tuple(fields.items()), bases=(models.Model,)
),
migrations.CreateModel(
name="MyModel3",
fields=tuple(fields.items()),
options=options,
bases=(models.Model,),
),
migrations.DeleteModel("MyModel"),
migrations.AddField(
"OtherModel", "datetimefield", fields["datetimefield"]
),
],
"dependencies": [("testapp", "some_other_one")],
},
)
writer = MigrationWriter(migration)
output = writer.as_string()
# We don't test the output formatting - that's too fragile.
# Just make sure it runs for now, and that things look alright.
result = self.safe_exec(output)
self.assertIn("Migration", result)
def test_migration_path(self):
test_apps = [
"migrations.migrations_test_apps.normal",
"migrations.migrations_test_apps.with_package_model",
"migrations.migrations_test_apps.without_init_file",
]
base_dir = os.path.dirname(os.path.dirname(__file__))
for app in test_apps:
with self.modify_settings(INSTALLED_APPS={"append": app}):
migration = migrations.Migration("0001_initial", app.split(".")[-1])
expected_path = os.path.join(
base_dir, *(app.split(".") + ["migrations", "0001_initial.py"])
)
writer = MigrationWriter(migration)
self.assertEqual(writer.path, expected_path)
def test_custom_operation(self):
migration = type(
"Migration",
(migrations.Migration,),
{
"operations": [
custom_migration_operations.operations.TestOperation(),
custom_migration_operations.operations.CreateModel(),
migrations.CreateModel("MyModel", (), {}, (models.Model,)),
custom_migration_operations.more_operations.TestOperation(),
],
"dependencies": [],
},
)
writer = MigrationWriter(migration)
output = writer.as_string()
result = self.safe_exec(output)
self.assertIn("custom_migration_operations", result)
self.assertNotEqual(
result["custom_migration_operations"].operations.TestOperation,
result["custom_migration_operations"].more_operations.TestOperation,
)
def test_sorted_imports(self):
"""
#24155 - Tests ordering of imports.
"""
migration = type(
"Migration",
(migrations.Migration,),
{
"operations": [
migrations.AddField(
"mymodel",
"myfield",
models.DateTimeField(
default=datetime.datetime(
2012, 1, 1, 1, 1, tzinfo=datetime.timezone.utc
),
),
),
]
},
)
writer = MigrationWriter(migration)
output = writer.as_string()
self.assertIn(
"import datetime\nfrom django.db import migrations, models\n",
output,
)
def test_migration_file_header_comments(self):
"""
Test comments at top of file.
"""
migration = type("Migration", (migrations.Migration,), {"operations": []})
dt = datetime.datetime(2015, 7, 31, 4, 40, 0, 0, tzinfo=datetime.timezone.utc)
with mock.patch("django.db.migrations.writer.now", lambda: dt):
for include_header in (True, False):
with self.subTest(include_header=include_header):
writer = MigrationWriter(migration, include_header)
output = writer.as_string()
self.assertEqual(
include_header,
output.startswith(
"# Generated by Django %s on 2015-07-31 04:40\n\n"
% get_version()
),
)
if not include_header:
# Make sure the output starts with something that's not
# a comment or indentation or blank line
self.assertRegex(
output.splitlines(keepends=True)[0], r"^[^#\s]+"
)
def test_models_import_omitted(self):
"""
django.db.models shouldn't be imported if unused.
"""
migration = type(
"Migration",
(migrations.Migration,),
{
"operations": [
migrations.AlterModelOptions(
name="model",
options={
"verbose_name": "model",
"verbose_name_plural": "models",
},
),
]
},
)
writer = MigrationWriter(migration)
output = writer.as_string()
self.assertIn("from django.db import migrations\n", output)
def test_deconstruct_class_arguments(self):
# Yes, it doesn't make sense to use a class as a default for a
# CharField. It does make sense for custom fields though, for example
# an enumfield that takes the enum class as an argument.
string = MigrationWriter.serialize(
models.CharField(default=DeconstructibleInstances)
)[0]
self.assertEqual(
string,
"models.CharField(default=migrations.test_writer.DeconstructibleInstances)",
)
def test_register_serializer(self):
class ComplexSerializer(BaseSerializer):
def serialize(self):
return "complex(%r)" % self.value, {}
MigrationWriter.register_serializer(complex, ComplexSerializer)
self.assertSerializedEqual(complex(1, 2))
MigrationWriter.unregister_serializer(complex)
with self.assertRaisesMessage(ValueError, "Cannot serialize: (1+2j)"):
self.assertSerializedEqual(complex(1, 2))
def test_register_non_serializer(self):
with self.assertRaisesMessage(
ValueError, "'TestModel1' must inherit from 'BaseSerializer'."
):
MigrationWriter.register_serializer(complex, TestModel1)
|
607cf1f5a95648e1334b3002d6bd58f47e5a63c6ef2b3938b2b3627582b119a9 | import asyncio
import difflib
import inspect
import json
import logging
import posixpath
import sys
import threading
import unittest
import warnings
from collections import Counter
from contextlib import contextmanager
from copy import copy, deepcopy
from difflib import get_close_matches
from functools import wraps
from unittest.suite import _DebugResult
from unittest.util import safe_repr
from urllib.parse import (
parse_qsl,
unquote,
urlencode,
urljoin,
urlparse,
urlsplit,
urlunparse,
)
from urllib.request import url2pathname
from asgiref.sync import async_to_sync
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.files import locks
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import ThreadedWSGIServer, WSGIRequestHandler
from django.core.signals import setting_changed
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.http.request import split_domain_port, validate_host
from django.http.response import HttpResponseBase
from django.test.client import AsyncClient, Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import template_rendered
from django.test.utils import (
CaptureQueriesContext,
ContextList,
compare_xml,
modify_settings,
override_settings,
)
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import classproperty
from django.utils.version import PY310
from django.views.static import serve
logger = logging.getLogger("django.test")
__all__ = (
"TestCase",
"TransactionTestCase",
"SimpleTestCase",
"skipIfDBFeature",
"skipUnlessDBFeature",
)
def to_list(value):
"""Put value into a list if it's not already one."""
if not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = "%s\n%s" % (msg, e)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super().__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super().__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed,
self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s"
% (
executed,
self.num,
"\n".join(
"%d. %s" % (i, query["sql"])
for i, query in enumerate(self.captured_queries, start=1)
),
),
)
class _AssertTemplateUsedContext:
def __init__(self, test_case, template_name, msg_prefix="", count=None):
self.test_case = test_case
self.template_name = template_name
self.msg_prefix = msg_prefix
self.count = count
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
self.test_case._assert_template_used(
self.template_name,
self.rendered_template_names,
self.msg_prefix,
self.count,
)
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
self.test()
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
self.test_case.assertFalse(
self.template_name in self.rendered_template_names,
f"{self.msg_prefix}Template '{self.template_name}' was used "
f"unexpectedly in rendering the response",
)
class DatabaseOperationForbidden(AssertionError):
pass
class _DatabaseFailure:
def __init__(self, wrapped, message):
self.wrapped = wrapped
self.message = message
def __call__(self):
raise DatabaseOperationForbidden(self.message)
# RemovedInDjango50Warning
class _AssertFormErrorDeprecationHelper:
@staticmethod
def assertFormError(self, response, form, field, errors, msg_prefix=""):
"""
Search through all the rendered contexts of the `response` for a form named
`form` then dispatch to the new assertFormError() using that instance.
If multiple contexts contain the form, they're all checked in order and any
failure will abort (this matches the old behavior).
"""
warning_msg = (
f"Passing response to assertFormError() is deprecated. Use the form object "
f"directly: assertFormError(response.context[{form!r}], {field!r}, ...)"
)
warnings.warn(warning_msg, RemovedInDjango50Warning, stacklevel=2)
full_msg_prefix = f"{msg_prefix}: " if msg_prefix else ""
contexts = to_list(response.context) if response.context is not None else []
if not contexts:
self.fail(
f"{full_msg_prefix}Response did not use any contexts to render the "
f"response"
)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form not in context:
continue
found_form = True
self.assertFormError(context[form], field, errors, msg_prefix=msg_prefix)
if not found_form:
self.fail(
f"{full_msg_prefix}The form '{form}' was not used to render the "
f"response"
)
@staticmethod
def assertFormsetError(
self, response, formset, form_index, field, errors, msg_prefix=""
):
"""
Search for a formset named "formset" in the "response" and dispatch to
the new assertFormsetError() using that instance. If the name is found
in multiple contexts they're all checked in order and any failure will
abort the test.
"""
warning_msg = (
f"Passing response to assertFormsetError() is deprecated. Use the formset "
f"object directly: assertFormsetError(response.context[{formset!r}], "
f"{form_index!r}, ...)"
)
warnings.warn(warning_msg, RemovedInDjango50Warning, stacklevel=2)
full_msg_prefix = f"{msg_prefix}: " if msg_prefix else ""
contexts = to_list(response.context) if response.context is not None else []
if not contexts:
self.fail(
f"{full_msg_prefix}Response did not use any contexts to render the "
f"response"
)
found_formset = False
for i, context in enumerate(contexts):
if formset not in context or not hasattr(context[formset], "forms"):
continue
found_formset = True
self.assertFormsetError(
context[formset], form_index, field, errors, msg_prefix
)
if not found_formset:
self.fail(
f"{full_msg_prefix}The formset '{formset}' was not used to render the "
f"response"
)
@classmethod
def patch_signature(cls, new_method):
"""
Replace the decorated method with a new one that inspects the passed
args/kwargs and dispatch to the old implementation (with deprecation
warning) when it detects the old signature.
"""
@wraps(new_method)
def patched_method(self, *args, **kwargs):
old_method = getattr(cls, new_method.__name__)
old_signature = inspect.signature(old_method)
try:
old_bound_args = old_signature.bind(self, *args, **kwargs)
except TypeError:
# If old signature doesn't match then either:
# 1) new signature will match
# 2) or a TypeError will be raised showing the user information
# about the new signature.
return new_method(self, *args, **kwargs)
new_signature = inspect.signature(new_method)
try:
new_bound_args = new_signature.bind(self, *args, **kwargs)
except TypeError:
# Old signature matches but not the new one (because of
# previous try/except).
return old_method(self, *args, **kwargs)
# If both signatures match, decide on which method to call by
# inspecting the first arg (arg[0] = self).
assert old_bound_args.args[1] == new_bound_args.args[1]
if hasattr(
old_bound_args.args[1], "context"
): # Looks like a response object => old method.
return old_method(self, *args, **kwargs)
elif isinstance(old_bound_args.args[1], HttpResponseBase):
raise ValueError(
f"{old_method.__name__}() is only usable on responses fetched "
f"using the Django test Client."
)
else:
return new_method(self, *args, **kwargs)
return patched_method
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
async_client_class = AsyncClient
_overridden_settings = None
_modified_settings = None
databases = set()
_disallowed_database_msg = (
"Database %(operation)s to %(alias)r are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to ensure "
"proper test isolation or add %(alias)r to %(test)s.databases to silence "
"this failure."
)
_disallowed_connection_methods = [
("connect", "connections"),
("temporary_connection", "connections"),
("cursor", "queries"),
("chunked_cursor", "queries"),
]
@classmethod
def setUpClass(cls):
super().setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
cls.addClassCleanup(cls._cls_overridden_context.disable)
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
cls.addClassCleanup(cls._cls_modified_context.disable)
cls._add_databases_failures()
cls.addClassCleanup(cls._remove_databases_failures)
@classmethod
def _validate_databases(cls):
if cls.databases == "__all__":
return frozenset(connections)
for alias in cls.databases:
if alias not in connections:
message = (
"%s.%s.databases refers to %r which is not defined in "
"settings.DATABASES."
% (
cls.__module__,
cls.__qualname__,
alias,
)
)
close_matches = get_close_matches(alias, list(connections))
if close_matches:
message += " Did you mean %r?" % close_matches[0]
raise ImproperlyConfigured(message)
return frozenset(cls.databases)
@classmethod
def _add_databases_failures(cls):
cls.databases = cls._validate_databases()
for alias in connections:
if alias in cls.databases:
continue
connection = connections[alias]
for name, operation in cls._disallowed_connection_methods:
message = cls._disallowed_database_msg % {
"test": "%s.%s" % (cls.__module__, cls.__qualname__),
"alias": alias,
"operation": operation,
}
method = getattr(connection, name)
setattr(connection, name, _DatabaseFailure(method, message))
@classmethod
def _remove_databases_failures(cls):
for alias in connections:
if alias in cls.databases:
continue
connection = connections[alias]
for name, _ in cls._disallowed_connection_methods:
method = getattr(connection, name)
setattr(connection, name, method.wrapped)
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
self._setup_and_call(result)
def debug(self):
"""Perform the same as __call__(), without catching the exception."""
debug_result = _DebugResult()
self._setup_and_call(debug_result, debug=True)
def _setup_and_call(self, result, debug=False):
"""
Perform the following in order: pre-setup, run test, post-teardown,
skipping pre/post hooks if test is set to be skipped.
If debug=True, reraise any errors in setup and use super().debug()
instead of __call__() to run the test.
"""
testMethod = getattr(self, self._testMethodName)
skipped = getattr(self.__class__, "__unittest_skip__", False) or getattr(
testMethod, "__unittest_skip__", False
)
# Convert async test methods.
if asyncio.iscoroutinefunction(testMethod):
setattr(self, self._testMethodName, async_to_sync(testMethod))
if not skipped:
try:
self._pre_setup()
except Exception:
if debug:
raise
result.addError(self, sys.exc_info())
return
if debug:
super().debug()
else:
super().__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
if debug:
raise
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""
Perform pre-test setup:
* Create a test client.
* Clear the mail test outbox.
"""
self.client = self.client_class()
self.async_client = self.async_client_class()
mail.outbox = []
def _post_teardown(self):
"""Perform post-test things."""
pass
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts to the
original value when exiting the context.
"""
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
"""
A context manager that temporarily applies changes a list setting and
reverts back to the original value when exiting the context.
"""
return modify_settings(**kwargs)
def assertRedirects(
self,
response,
expected_url,
status_code=302,
target_status_code=200,
msg_prefix="",
fetch_redirect_response=True,
):
"""
Assert that a response redirected to a specific URL and that the
redirect URL can be loaded.
Won't work for external links since it uses the test client to do a
request (use fetch_redirect_response=False to check such links without
fetching them).
"""
if msg_prefix:
msg_prefix += ": "
if hasattr(response, "redirect_chain"):
# The request was a followed redirect
self.assertTrue(
response.redirect_chain,
msg_prefix
+ (
"Response didn't redirect as expected: Response code was %d "
"(expected %d)"
)
% (response.status_code, status_code),
)
self.assertEqual(
response.redirect_chain[0][1],
status_code,
msg_prefix
+ (
"Initial response didn't redirect as expected: Response code was "
"%d (expected %d)"
)
% (response.redirect_chain[0][1], status_code),
)
url, status_code = response.redirect_chain[-1]
self.assertEqual(
response.status_code,
target_status_code,
msg_prefix
+ (
"Response didn't redirect as expected: Final Response code was %d "
"(expected %d)"
)
% (response.status_code, target_status_code),
)
else:
# Not a followed redirect
self.assertEqual(
response.status_code,
status_code,
msg_prefix
+ (
"Response didn't redirect as expected: Response code was %d "
"(expected %d)"
)
% (response.status_code, status_code),
)
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
# Prepend the request path to handle relative path redirects.
if not path.startswith("/"):
url = urljoin(response.request["PATH_INFO"], url)
path = urljoin(response.request["PATH_INFO"], path)
if fetch_redirect_response:
# netloc might be empty, or in cases where Django tests the
# HTTP scheme, the convention is for netloc to be 'testserver'.
# Trust both as "internal" URLs here.
domain, port = split_domain_port(netloc)
if domain and not validate_host(domain, settings.ALLOWED_HOSTS):
raise ValueError(
"The test client is unable to fetch remote URLs (got %s). "
"If the host is served by Django, add '%s' to ALLOWED_HOSTS. "
"Otherwise, use "
"assertRedirects(..., fetch_redirect_response=False)."
% (url, domain)
)
# Get the redirection page, using the same client that was used
# to obtain the original response.
extra = response.client.extra or {}
redirect_response = response.client.get(
path,
QueryDict(query),
secure=(scheme == "https"),
**extra,
)
self.assertEqual(
redirect_response.status_code,
target_status_code,
msg_prefix
+ (
"Couldn't retrieve redirection page '%s': response code was %d "
"(expected %d)"
)
% (path, redirect_response.status_code, target_status_code),
)
self.assertURLEqual(
url,
expected_url,
msg_prefix
+ "Response redirected to '%s', expected '%s'" % (url, expected_url),
)
def assertURLEqual(self, url1, url2, msg_prefix=""):
"""
Assert that two URLs are the same, ignoring the order of query string
parameters except for parameters with the same name.
For example, /path/?x=1&y=2 is equal to /path/?y=2&x=1, but
/path/?a=1&a=2 isn't equal to /path/?a=2&a=1.
"""
def normalize(url):
"""Sort the URL's query string parameters."""
url = str(url) # Coerce reverse_lazy() URLs.
scheme, netloc, path, params, query, fragment = urlparse(url)
query_parts = sorted(parse_qsl(query))
return urlunparse(
(scheme, netloc, path, params, urlencode(query_parts), fragment)
)
self.assertEqual(
normalize(url1),
normalize(url2),
msg_prefix + "Expected '%s' to equal '%s'." % (url1, url2),
)
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if (
hasattr(response, "render")
and callable(response.render)
and not response.is_rendered
):
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(
response.status_code,
status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code),
)
if response.streaming:
content = b"".join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = str(text)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(
self, content, None, "Response's content is not valid HTML:"
)
text = assert_and_parse_html(
self, text, None, "Second argument is not valid HTML:"
)
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(
self, response, text, count=None, status_code=200, msg_prefix="", html=False
):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html
)
if count is not None:
self.assertEqual(
real_count,
count,
msg_prefix
+ "Found %d instances of %s in response (expected %d)"
% (real_count, text_repr, count),
)
else:
self.assertTrue(
real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr
)
def assertNotContains(
self, response, text, status_code=200, msg_prefix="", html=False
):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` doesn't occur in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html
)
self.assertEqual(
real_count, 0, msg_prefix + "Response should not contain %s" % text_repr
)
def _check_test_client_response(self, response, attribute, method_name):
"""
Raise a ValueError if the given response doesn't have the required
attribute.
"""
if not hasattr(response, attribute):
raise ValueError(
f"{method_name}() is only usable on responses fetched using "
"the Django test Client."
)
def _assert_form_error(self, form, field, errors, msg_prefix, form_repr):
if not form.is_bound:
self.fail(
f"{msg_prefix}The {form_repr} is not bound, it will never have any "
f"errors."
)
if field is not None and field not in form.fields:
self.fail(
f"{msg_prefix}The {form_repr} does not contain the field {field!r}."
)
if field is None:
field_errors = form.non_field_errors()
failure_message = f"The non-field errors of {form_repr} don't match."
else:
field_errors = form.errors.get(field, [])
failure_message = (
f"The errors of field {field!r} on {form_repr} don't match."
)
self.assertEqual(field_errors, errors, msg_prefix + failure_message)
# RemovedInDjango50Warning: When the deprecation ends, remove the
# decorator.
@_AssertFormErrorDeprecationHelper.patch_signature
def assertFormError(self, form, field, errors, msg_prefix=""):
"""
Assert that a field named "field" on the given form object has specific
errors.
errors can be either a single error message or a list of errors
messages. Using errors=[] test that the field has no errors.
You can pass field=None to check the form's non-field errors.
"""
if errors is None:
warnings.warn(
"Passing errors=None to assertFormError() is deprecated, use "
"errors=[] instead.",
RemovedInDjango50Warning,
stacklevel=2,
)
errors = []
if msg_prefix:
msg_prefix += ": "
errors = to_list(errors)
self._assert_form_error(form, field, errors, msg_prefix, f"form {form!r}")
# RemovedInDjango50Warning: When the deprecation ends, remove the
# decorator.
@_AssertFormErrorDeprecationHelper.patch_signature
def assertFormsetError(self, formset, form_index, field, errors, msg_prefix=""):
"""
Similar to assertFormError() but for formsets.
Use form_index=None to check the formset's non-form errors (in that
case, you must also use field=None).
Otherwise use an integer to check the formset's n-th form for errors.
Other parameters are the same as assertFormError().
"""
if errors is None:
warnings.warn(
"Passing errors=None to assertFormsetError() is deprecated, "
"use errors=[] instead.",
RemovedInDjango50Warning,
stacklevel=2,
)
errors = []
if form_index is None and field is not None:
raise ValueError("You must use field=None with form_index=None.")
if msg_prefix:
msg_prefix += ": "
errors = to_list(errors)
if not formset.is_bound:
self.fail(
f"{msg_prefix}The formset {formset!r} is not bound, it will never have "
f"any errors."
)
if form_index is not None and form_index >= formset.total_form_count():
form_count = formset.total_form_count()
form_or_forms = "forms" if form_count > 1 else "form"
self.fail(
f"{msg_prefix}The formset {formset!r} only has {form_count} "
f"{form_or_forms}."
)
if form_index is not None:
form_repr = f"form {form_index} of formset {formset!r}"
self._assert_form_error(
formset.forms[form_index], field, errors, msg_prefix, form_repr
)
else:
failure_message = f"The non-form errors of formset {formset!r} don't match."
self.assertEqual(
formset.non_form_errors(), errors, msg_prefix + failure_message
)
def _get_template_used(self, response, template_name, msg_prefix, method_name):
if response is None and template_name is None:
raise TypeError("response and/or template_name argument must be provided")
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None:
self._check_test_client_response(response, "templates", method_name)
if not hasattr(response, "templates") or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not None]
return None, template_names, msg_prefix
def _assert_template_used(self, template_name, template_names, msg_prefix, count):
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(
template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s"
% (template_name, ", ".join(template_names)),
)
if count is not None:
self.assertEqual(
template_names.count(template_name),
count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)."
% (template_name, count, template_names.count(template_name)),
)
def assertTemplateUsed(
self, response=None, template_name=None, msg_prefix="", count=None
):
"""
Assert that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._get_template_used(
response,
template_name,
msg_prefix,
"assertTemplateUsed",
)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(
self, context_mgr_template, msg_prefix, count
)
self._assert_template_used(template_name, template_names, msg_prefix, count)
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=""):
"""
Assert that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._get_template_used(
response,
template_name,
msg_prefix,
"assertTemplateNotUsed",
)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template, msg_prefix)
self.assertFalse(
template_name in template_names,
msg_prefix
+ "Template '%s' was used unexpectedly in rendering the response"
% template_name,
)
@contextmanager
def _assert_raises_or_warns_cm(
self, func, cm_attr, expected_exception, expected_message
):
with func(expected_exception) as cm:
yield cm
self.assertIn(expected_message, str(getattr(cm, cm_attr)))
def _assertFooMessage(
self, func, cm_attr, expected_exception, expected_message, *args, **kwargs
):
callable_obj = None
if args:
callable_obj, *args = args
cm = self._assert_raises_or_warns_cm(
func, cm_attr, expected_exception, expected_message
)
# Assertion used in context manager fashion.
if callable_obj is None:
return cm
# Assertion was passed a callable.
with cm:
callable_obj(*args, **kwargs)
def assertRaisesMessage(
self, expected_exception, expected_message, *args, **kwargs
):
"""
Assert that expected_message is found in the message of a raised
exception.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
args: Function to be called and extra positional args.
kwargs: Extra kwargs.
"""
return self._assertFooMessage(
self.assertRaises,
"exception",
expected_exception,
expected_message,
*args,
**kwargs,
)
def assertWarnsMessage(self, expected_warning, expected_message, *args, **kwargs):
"""
Same as assertRaisesMessage but for assertWarns() instead of
assertRaises().
"""
return self._assertFooMessage(
self.assertWarns,
"warning",
expected_warning,
expected_message,
*args,
**kwargs,
)
# A similar method is available in Python 3.10+.
if not PY310:
@contextmanager
def assertNoLogs(self, logger, level=None):
"""
Assert no messages are logged on the logger, with at least the
given level.
"""
if isinstance(level, int):
level = logging.getLevelName(level)
elif level is None:
level = "INFO"
try:
with self.assertLogs(logger, level) as cm:
yield
except AssertionError as e:
msg = e.args[0]
expected_msg = (
f"no logs of level {level} or higher triggered on {logger}"
)
if msg != expected_msg:
raise e
else:
self.fail(f"Unexpected logs found: {cm.output!r}")
def assertFieldOutput(
self,
fieldclass,
valid,
invalid,
field_args=None,
field_kwargs=None,
empty_value="",
):
"""
Assert that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args, **{**field_kwargs, "required": False})
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [required.error_messages["required"]]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages, error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({"min_length": 2, "max_length": 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Assert that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(
self, html1, msg, "First argument is not valid HTML:"
)
dom2 = assert_and_parse_html(
self, html2, msg, "Second argument is not valid HTML:"
)
if dom1 != dom2:
standardMsg = "%s != %s" % (safe_repr(dom1, True), safe_repr(dom2, True))
diff = "\n" + "\n".join(
difflib.ndiff(
str(dom1).splitlines(),
str(dom2).splitlines(),
)
)
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Assert that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(
self, html1, msg, "First argument is not valid HTML:"
)
dom2 = assert_and_parse_html(
self, html2, msg, "Second argument is not valid HTML:"
)
if dom1 == dom2:
standardMsg = "%s == %s" % (safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=""):
needle = assert_and_parse_html(
self, needle, None, "First argument is not valid HTML:"
)
haystack = assert_and_parse_html(
self, haystack, None, "Second argument is not valid HTML:"
)
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(
real_count,
count,
msg_prefix
+ "Found %d instances of '%s' in response (expected %d)"
% (real_count, needle, count),
)
else:
self.assertTrue(
real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle
)
def assertJSONEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are not equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except json.JSONDecodeError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are semantically the same.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = "First or second argument is not valid XML\n%s" % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = "%s != %s" % (
safe_repr(xml1, True),
safe_repr(xml2, True),
)
diff = "\n" + "\n".join(
difflib.ndiff(xml1.splitlines(), xml2.splitlines())
)
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = "First or second argument is not valid XML\n%s" % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = "%s == %s" % (
safe_repr(xml1, True),
safe_repr(xml2, True),
)
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
# Subclasses can define fixtures which will be automatically installed.
fixtures = None
databases = {DEFAULT_DB_ALIAS}
_disallowed_database_msg = (
"Database %(operation)s to %(alias)r are not allowed in this test. "
"Add %(alias)r to %(test)s.databases to ensure proper test isolation "
"and silence this failure."
)
# If transactions aren't available, Django will serialize the database
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
def _pre_setup(self):
"""
Perform pre-test setup:
* If the class has an 'available_apps' attribute, restrict the app
registry to these applications, then fire the post_migrate signal --
it must run with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, install those fixtures.
"""
super()._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(
sender=settings._wrapped.__class__,
setting="INSTALLED_APPS",
value=self.available_apps,
enter=True,
)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting="INSTALLED_APPS",
value=settings.INSTALLED_APPS,
enter=False,
)
raise
# Clear the queries_log so that it's less likely to overflow (a single
# test probably won't execute 9K queries). If queries_log overflows,
# then assertNumQueries() doesn't work.
for db_name in self._databases_names(include_mirrors=False):
connections[db_name].queries_log.clear()
@classmethod
def _databases_names(cls, include_mirrors=True):
# Only consider allowed database aliases, including mirrors or not.
return [
alias
for alias in connections
if alias in cls.databases
and (
include_mirrors
or not connections[alias].settings_dict["TEST"]["MIRROR"]
)
]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list()
)
if sql_list:
with transaction.atomic(using=db_name):
with conn.cursor() as cursor:
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# Provide replica initial data from migrated apps, if needed.
if self.serialized_rollback and hasattr(
connections[db_name], "_test_serialized_contents"
):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command(
"loaddata", *self.fixtures, **{"verbosity": 0, "database": db_name}
)
def _should_reload_connections(self):
return True
def _post_teardown(self):
"""
Perform post-test things:
* Flush the contents of the database to leave a clean slate. If the
class has an 'available_apps' attribute, don't fire post_migrate.
* Force-close the connection so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super()._post_teardown()
if self._should_reload_connections():
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does a rollback, the effect
# of these statements is lost, which can affect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't
# happen, get a clean connection at the start of every test.
for conn in connections.all(initialized_only=True):
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting="INSTALLED_APPS",
value=settings.INSTALLED_APPS,
enter=False,
)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
# Flush the database
inhibit_post_migrate = (
self.available_apps is not None
or ( # Inhibit the post_migrate signal when using serialized
# rollback to avoid trying to recreate the serialized data.
self.serialized_rollback
and hasattr(connections[db_name], "_test_serialized_contents")
)
)
call_command(
"flush",
verbosity=0,
interactive=False,
database=db_name,
reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=inhibit_post_migrate,
)
def assertQuerysetEqual(self, qs, values, transform=None, ordered=True, msg=None):
values = list(values)
items = qs
if transform is not None:
items = map(transform, items)
if not ordered:
return self.assertDictEqual(Counter(items), Counter(values), msg=msg)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, "ordered") and not qs.ordered:
raise ValueError(
"Trying to compare non-ordered queryset against more than one "
"ordered value."
)
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, using=DEFAULT_DB_ALIAS, **kwargs):
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions(aliases=None):
"""
Return whether or not all (or specified) connections support
transactions.
"""
conns = (
connections.all()
if aliases is None
else (connections[alias] for alias in aliases)
)
return all(conn.features.supports_transactions for conn in conns)
class TestData:
"""
Descriptor to provide TestCase instance isolation for attributes assigned
during the setUpTestData() phase.
Allow safe alteration of objects assigned in setUpTestData() by test
methods by exposing deep copies instead of the original objects.
Objects are deep copied using a memo kept on the test case instance in
order to maintain their original relationships.
"""
memo_attr = "_testdata_memo"
def __init__(self, name, data):
self.name = name
self.data = data
def get_memo(self, testcase):
try:
memo = getattr(testcase, self.memo_attr)
except AttributeError:
memo = {}
setattr(testcase, self.memo_attr, memo)
return memo
def __get__(self, instance, owner):
if instance is None:
return self.data
memo = self.get_memo(instance)
data = deepcopy(self.data, memo)
setattr(instance, self.name, data)
return data
def __repr__(self):
return "<TestData: name=%r, data=%r>" % (self.name, self.data)
class TestCase(TransactionTestCase):
"""
Similar to TransactionTestCase, but use `transaction.atomic()` to achieve
test isolation.
In most situations, TestCase should be preferred to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
@classmethod
def _enter_atomics(cls):
"""Open atomic blocks for multiple databases."""
atomics = {}
for db_name in cls._databases_names():
atomic = transaction.atomic(using=db_name)
atomic._from_testcase = True
atomic.__enter__()
atomics[db_name] = atomic
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened by the previous method."""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def _databases_support_transactions(cls):
return connections_support_transactions(cls.databases)
@classmethod
def setUpClass(cls):
super().setUpClass()
if not cls._databases_support_transactions():
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command(
"loaddata",
*cls.fixtures,
**{"verbosity": 0, "database": db_name},
)
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
pre_attrs = cls.__dict__.copy()
try:
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
for name, value in cls.__dict__.items():
if value is not pre_attrs.get(name):
setattr(cls, name, TestData(name, value))
@classmethod
def tearDownClass(cls):
if cls._databases_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all(initialized_only=True):
conn.close()
super().tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase."""
pass
def _should_reload_connections(self):
if self._databases_support_transactions():
return False
return super()._should_reload_connections()
def _fixture_setup(self):
if not self._databases_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
self.setUpTestData()
return super()._fixture_setup()
if self.reset_sequences:
raise TypeError("reset_sequences cannot be used on TestCase instances")
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not self._databases_support_transactions():
return super()._fixture_teardown()
try:
for db_name in reversed(self._databases_names()):
if self._should_check_constraints(connections[db_name]):
connections[db_name].check_constraints()
finally:
self._rollback_atomics(self.atomics)
def _should_check_constraints(self, connection):
return (
connection.features.can_defer_constraint_checks
and not connection.needs_rollback
and connection.is_usable()
)
@classmethod
@contextmanager
def captureOnCommitCallbacks(cls, *, using=DEFAULT_DB_ALIAS, execute=False):
"""Context manager to capture transaction.on_commit() callbacks."""
callbacks = []
start_count = len(connections[using].run_on_commit)
try:
yield callbacks
finally:
while True:
callback_count = len(connections[using].run_on_commit)
for _, callback, robust in connections[using].run_on_commit[
start_count:
]:
callbacks.append(callback)
if execute:
if robust:
try:
callback()
except Exception as e:
logger.error(
f"Error calling {callback.__qualname__} in "
f"on_commit() (%s).",
e,
exc_info=True,
)
else:
callback()
if callback_count == len(connections[using].run_on_commit):
break
start_count = callback_count
class CheckCondition:
"""Descriptor class for deferred condition checking."""
def __init__(self, *conditions):
self.conditions = conditions
def add_condition(self, condition, reason):
return self.__class__(*self.conditions, (condition, reason))
def __get__(self, instance, cls=None):
# Trigger access for all bases.
if any(getattr(base, "__unittest_skip__", False) for base in cls.__bases__):
return True
for condition, reason in self.conditions:
if condition():
# Override this descriptor's value and set the skip reason.
cls.__unittest_skip__ = True
cls.__unittest_skip_why__ = reason
return True
return False
def _deferredSkip(condition, reason, name):
def decorator(test_func):
nonlocal condition
if not (
isinstance(test_func, type) and issubclass(test_func, unittest.TestCase)
):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if (
args
and isinstance(args[0], unittest.TestCase)
and connection.alias not in getattr(args[0], "databases", {})
):
raise ValueError(
"%s cannot be used on %s as %s doesn't allow queries "
"against the %r database."
% (
name,
args[0],
args[0].__class__.__qualname__,
connection.alias,
)
)
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
databases = getattr(test_item, "databases", None)
if not databases or connection.alias not in databases:
# Defer raising to allow importing test class's module.
def condition():
raise ValueError(
"%s cannot be used on %s as it doesn't allow queries "
"against the '%s' database."
% (
name,
test_item,
connection.alias,
)
)
# Retrieve the possibly existing value from the class's dict to
# avoid triggering the descriptor.
skip = test_func.__dict__.get("__unittest_skip__")
if isinstance(skip, CheckCondition):
test_item.__unittest_skip__ = skip.add_condition(condition, reason)
elif skip is not True:
test_item.__unittest_skip__ = CheckCondition((condition, reason))
return test_item
return decorator
def skipIfDBFeature(*features):
"""Skip a test if a database has at least one of the named features."""
return _deferredSkip(
lambda: any(
getattr(connection.features, feature, False) for feature in features
),
"Database has feature(s) %s" % ", ".join(features),
"skipIfDBFeature",
)
def skipUnlessDBFeature(*features):
"""Skip a test unless a database has all the named features."""
return _deferredSkip(
lambda: not all(
getattr(connection.features, feature, False) for feature in features
),
"Database doesn't support feature(s): %s" % ", ".join(features),
"skipUnlessDBFeature",
)
def skipUnlessAnyDBFeature(*features):
"""Skip a test unless a database has any of the named features."""
return _deferredSkip(
lambda: not any(
getattr(connection.features, feature, False) for feature in features
),
"Database doesn't support any of the feature(s): %s" % ", ".join(features),
"skipUnlessAnyDBFeature",
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
A WSGIRequestHandler that doesn't log to standard output any of the
requests received, so as to not clutter the test result output.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super().__init__()
def _should_handle(self, path):
"""
Check if the path should be handled. Ignore the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""Return the relative path to the file on disk for the given URL."""
relative_url = url[len(self.base_url[2]) :]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super().get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace("\\", "/").lstrip("/")
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super().__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""Thread for running a live HTTP server while the tests are running."""
server_class = ThreadedWSGIServer
def __init__(self, host, static_handler, connections_override=None, port=0):
self.host = host
self.port = port
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super().__init__()
def run(self):
"""
Set up the live server and databases, and then loop over handling
HTTP requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
self.httpd = self._create_server()
# If binding to port zero, assign the port allocated by the OS.
if self.port == 0:
self.port = self.httpd.server_address[1]
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
finally:
connections.close_all()
def _create_server(self, connections_override=None):
return self.server_class(
(self.host, self.port),
QuietWSGIRequestHandler,
allow_reuse_address=False,
connections_override=connections_override,
)
def terminate(self):
if hasattr(self, "httpd"):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
self.join()
class LiveServerTestCase(TransactionTestCase):
"""
Do basically the same as TransactionTestCase but also launch a live HTTP
server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
It inherits from TransactionTestCase instead of TestCase because the
threads don't share the same transactions (unless if using in-memory sqlite)
and each thread needs to commit all their transactions so that the other
thread can see the changes.
"""
host = "localhost"
port = 0
server_thread_class = LiveServerThread
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return "http://%s:%s" % (cls.host, cls.server_thread.port)
@classproperty
def allowed_host(cls):
return cls.host
@classmethod
def _make_connections_override(cls):
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == "sqlite" and conn.is_in_memory_db():
connections_override[conn.alias] = conn
return connections_override
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._live_server_modified_settings = modify_settings(
ALLOWED_HOSTS={"append": cls.allowed_host},
)
cls._live_server_modified_settings.enable()
cls.addClassCleanup(cls._live_server_modified_settings.disable)
cls._start_server_thread()
@classmethod
def _start_server_thread(cls):
connections_override = cls._make_connections_override()
for conn in connections_override.values():
# Explicitly enable thread-shareability for this connection.
conn.inc_thread_sharing()
cls.server_thread = cls._create_server_thread(connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
cls.addClassCleanup(cls._terminate_thread)
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
raise cls.server_thread.error
@classmethod
def _create_server_thread(cls, connections_override):
return cls.server_thread_class(
cls.host,
cls.static_handler,
connections_override=connections_override,
port=cls.port,
)
@classmethod
def _terminate_thread(cls):
# Terminate the live server's thread.
cls.server_thread.terminate()
# Restore shared connections' non-shareability.
for conn in cls.server_thread.connections_override.values():
conn.dec_thread_sharing()
class SerializeMixin:
"""
Enforce serialization of TestCases that share a common resource.
Define a common 'lockfile' for each set of TestCases to serialize. This
file must exist on the filesystem.
Place it early in the MRO in order to isolate setUpClass()/tearDownClass().
"""
lockfile = None
def __init_subclass__(cls, /, **kwargs):
super().__init_subclass__(**kwargs)
if cls.lockfile is None:
raise ValueError(
"{}.lockfile isn't set. Set it to a unique value "
"in the base class.".format(cls.__name__)
)
@classmethod
def setUpClass(cls):
cls._lockfile = open(cls.lockfile)
cls.addClassCleanup(cls._lockfile.close)
locks.lock(cls._lockfile, locks.LOCK_EX)
super().setUpClass()
|
5cb65e45f7ff0919da12c26c2b3d5823a1f2012d79b00d5bb5ef9757d84b3ccc | from contextlib import ContextDecorator, contextmanager
from django.db import (
DEFAULT_DB_ALIAS,
DatabaseError,
Error,
ProgrammingError,
connections,
)
class TransactionManagementError(ProgrammingError):
"""Transaction management is used improperly."""
pass
def get_connection(using=None):
"""
Get a database connection by name, or the default database connection
if no name is provided. This is a private API.
"""
if using is None:
using = DEFAULT_DB_ALIAS
return connections[using]
def get_autocommit(using=None):
"""Get the autocommit status of the connection."""
return get_connection(using).get_autocommit()
def set_autocommit(autocommit, using=None):
"""Set the autocommit status of the connection."""
return get_connection(using).set_autocommit(autocommit)
def commit(using=None):
"""Commit a transaction."""
get_connection(using).commit()
def rollback(using=None):
"""Roll back a transaction."""
get_connection(using).rollback()
def savepoint(using=None):
"""
Create a savepoint (if supported and required by the backend) inside the
current transaction. Return an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
return get_connection(using).savepoint()
def savepoint_rollback(sid, using=None):
"""
Roll back the most recent savepoint (if one exists). Do nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_rollback(sid)
def savepoint_commit(sid, using=None):
"""
Commit the most recent savepoint (if one exists). Do nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_commit(sid)
def clean_savepoints(using=None):
"""
Reset the counter used to generate unique savepoint ids in this thread.
"""
get_connection(using).clean_savepoints()
def get_rollback(using=None):
"""Get the "needs rollback" flag -- for *advanced use* only."""
return get_connection(using).get_rollback()
def set_rollback(rollback, using=None):
"""
Set or unset the "needs rollback" flag -- for *advanced use* only.
When `rollback` is `True`, trigger a rollback when exiting the innermost
enclosing atomic block that has `savepoint=True` (that's the default). Use
this to force a rollback without raising an exception.
When `rollback` is `False`, prevent such a rollback. Use this only after
rolling back to a known-good state! Otherwise, you break the atomic block
and data corruption may occur.
"""
return get_connection(using).set_rollback(rollback)
@contextmanager
def mark_for_rollback_on_error(using=None):
"""
Internal low-level utility to mark a transaction as "needs rollback" when
an exception is raised while not enforcing the enclosed block to be in a
transaction. This is needed by Model.save() and friends to avoid starting a
transaction when in autocommit mode and a single query is executed.
It's equivalent to:
connection = get_connection(using)
if connection.get_autocommit():
yield
else:
with transaction.atomic(using=using, savepoint=False):
yield
but it uses low-level utilities to avoid performance overhead.
"""
try:
yield
except Exception:
connection = get_connection(using)
if connection.in_atomic_block:
connection.needs_rollback = True
raise
def on_commit(func, using=None, robust=False):
"""
Register `func` to be called when the current transaction is committed.
If the current transaction is rolled back, `func` will not be called.
"""
get_connection(using).on_commit(func, robust)
#################################
# Decorators / context managers #
#################################
class Atomic(ContextDecorator):
"""
Guarantee the atomic execution of a given block.
An instance can be used either as a decorator or as a context manager.
When it's used as a decorator, __call__ wraps the execution of the
decorated function in the instance itself, used as a context manager.
When it's used as a context manager, __enter__ creates a transaction or a
savepoint, depending on whether a transaction is already in progress, and
__exit__ commits the transaction or releases the savepoint on normal exit,
and rolls back the transaction or to the savepoint on exceptions.
It's possible to disable the creation of savepoints if the goal is to
ensure that some code runs within a transaction without creating overhead.
A stack of savepoints identifiers is maintained as an attribute of the
connection. None denotes the absence of a savepoint.
This allows reentrancy even if the same AtomicWrapper is reused. For
example, it's possible to define `oa = atomic('other')` and use `@oa` or
`with oa:` multiple times.
Since database connections are thread-local, this is thread-safe.
An atomic block can be tagged as durable. In this case, raise a
RuntimeError if it's nested within another atomic block. This guarantees
that database changes in a durable block are committed to the database when
the block exists without error.
This is a private API.
"""
def __init__(self, using, savepoint, durable):
self.using = using
self.savepoint = savepoint
self.durable = durable
self._from_testcase = False
def __enter__(self):
connection = get_connection(self.using)
if (
self.durable
and connection.atomic_blocks
and not connection.atomic_blocks[-1]._from_testcase
):
raise RuntimeError(
"A durable atomic block cannot be nested within another "
"atomic block."
)
if not connection.in_atomic_block:
# Reset state when entering an outermost atomic block.
connection.commit_on_exit = True
connection.needs_rollback = False
if not connection.get_autocommit():
# Pretend we're already in an atomic block to bypass the code
# that disables autocommit to enter a transaction, and make a
# note to deal with this case in __exit__.
connection.in_atomic_block = True
connection.commit_on_exit = False
if connection.in_atomic_block:
# We're already in a transaction; create a savepoint, unless we
# were told not to or we're already waiting for a rollback. The
# second condition avoids creating useless savepoints and prevents
# overwriting needs_rollback until the rollback is performed.
if self.savepoint and not connection.needs_rollback:
sid = connection.savepoint()
connection.savepoint_ids.append(sid)
else:
connection.savepoint_ids.append(None)
else:
connection.set_autocommit(
False, force_begin_transaction_with_broken_autocommit=True
)
connection.in_atomic_block = True
if connection.in_atomic_block:
connection.atomic_blocks.append(self)
def __exit__(self, exc_type, exc_value, traceback):
connection = get_connection(self.using)
if connection.in_atomic_block:
connection.atomic_blocks.pop()
if connection.savepoint_ids:
sid = connection.savepoint_ids.pop()
else:
# Prematurely unset this flag to allow using commit or rollback.
connection.in_atomic_block = False
try:
if connection.closed_in_transaction:
# The database will perform a rollback by itself.
# Wait until we exit the outermost block.
pass
elif exc_type is None and not connection.needs_rollback:
if connection.in_atomic_block:
# Release savepoint if there is one
if sid is not None:
try:
connection.savepoint_commit(sid)
except DatabaseError:
try:
connection.savepoint_rollback(sid)
# The savepoint won't be reused. Release it to
# minimize overhead for the database server.
connection.savepoint_commit(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
raise
else:
# Commit transaction
try:
connection.commit()
except DatabaseError:
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
raise
else:
# This flag will be set to True again if there isn't a savepoint
# allowing to perform the rollback at this level.
connection.needs_rollback = False
if connection.in_atomic_block:
# Roll back to savepoint if there is one, mark for rollback
# otherwise.
if sid is None:
connection.needs_rollback = True
else:
try:
connection.savepoint_rollback(sid)
# The savepoint won't be reused. Release it to
# minimize overhead for the database server.
connection.savepoint_commit(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
else:
# Roll back transaction
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
finally:
# Outermost block exit when autocommit was enabled.
if not connection.in_atomic_block:
if connection.closed_in_transaction:
connection.connection = None
else:
connection.set_autocommit(True)
# Outermost block exit when autocommit was disabled.
elif not connection.savepoint_ids and not connection.commit_on_exit:
if connection.closed_in_transaction:
connection.connection = None
else:
connection.in_atomic_block = False
def atomic(using=None, savepoint=True, durable=False):
# Bare decorator: @atomic -- although the first argument is called
# `using`, it's actually the function being decorated.
if callable(using):
return Atomic(DEFAULT_DB_ALIAS, savepoint, durable)(using)
# Decorator: @atomic(...) or context manager: with atomic(...): ...
else:
return Atomic(using, savepoint, durable)
def _non_atomic_requests(view, using):
try:
view._non_atomic_requests.add(using)
except AttributeError:
view._non_atomic_requests = {using}
return view
def non_atomic_requests(using=None):
if callable(using):
return _non_atomic_requests(using, DEFAULT_DB_ALIAS)
else:
if using is None:
using = DEFAULT_DB_ALIAS
return lambda view: _non_atomic_requests(view, using)
|
41591def63ecb3df1dc8d5490b66749a7375c8a2f6515f28479ca682fdd2e7c9 | """
Various data structures used in query construction.
Factored out from django.db.models.query to avoid making the main module very
large and/or so that they can be used by other modules without getting into
circular import difficulties.
"""
import functools
import inspect
import logging
from collections import namedtuple
from django.core.exceptions import FieldError
from django.db import DEFAULT_DB_ALIAS, DatabaseError
from django.db.models.constants import LOOKUP_SEP
from django.utils import tree
logger = logging.getLogger("django.db.models")
# PathInfo is used when converting lookups (fk__somecol). The contents
# describe the relation in Model terms (model Options and Fields for both
# sides of the relation. The join_field is the field backing the relation.
PathInfo = namedtuple(
"PathInfo",
"from_opts to_opts target_fields join_field m2m direct filtered_relation",
)
def subclasses(cls):
yield cls
for subclass in cls.__subclasses__():
yield from subclasses(subclass)
class Q(tree.Node):
"""
Encapsulate filters as objects that can then be combined logically (using
`&` and `|`).
"""
# Connection types
AND = "AND"
OR = "OR"
XOR = "XOR"
default = AND
conditional = True
def __init__(self, *args, _connector=None, _negated=False, **kwargs):
super().__init__(
children=[*args, *sorted(kwargs.items())],
connector=_connector,
negated=_negated,
)
def _combine(self, other, conn):
if getattr(other, "conditional", False) is False:
raise TypeError(other)
if not self:
return other.copy()
if not other and isinstance(other, Q):
return self.copy()
obj = self.create(connector=conn)
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __xor__(self, other):
return self._combine(other, self.XOR)
def __invert__(self):
obj = self.copy()
obj.negate()
return obj
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# We must promote any new joins to left outer joins so that when Q is
# used as an expression, rows aren't filtered due to joins.
clause, joins = query._add_q(
self,
reuse,
allow_joins=allow_joins,
split_subq=False,
check_filterable=False,
)
query.promote_joins(joins)
return clause
def flatten(self):
"""
Recursively yield this Q object and all subexpressions, in depth-first
order.
"""
yield self
for child in self.children:
if isinstance(child, tuple):
# Use the lookup.
child = child[1]
if hasattr(child, "flatten"):
yield from child.flatten()
else:
yield child
def check(self, against, using=DEFAULT_DB_ALIAS):
"""
Do a database query to check if the expressions of the Q instance
matches against the expressions.
"""
# Avoid circular imports.
from django.db.models import Value
from django.db.models.sql import Query
from django.db.models.sql.constants import SINGLE
query = Query(None)
for name, value in against.items():
if not hasattr(value, "resolve_expression"):
value = Value(value)
query.add_annotation(value, name, select=False)
query.add_annotation(Value(1), "_check")
# This will raise a FieldError if a field is missing in "against".
query.add_q(self)
compiler = query.get_compiler(using=using)
try:
return compiler.execute_sql(SINGLE) is not None
except DatabaseError as e:
logger.warning("Got a database error calling check() on %r: %s", self, e)
return True
def deconstruct(self):
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if path.startswith("django.db.models.query_utils"):
path = path.replace("django.db.models.query_utils", "django.db.models")
args = tuple(self.children)
kwargs = {}
if self.connector != self.default:
kwargs["_connector"] = self.connector
if self.negated:
kwargs["_negated"] = True
return path, args, kwargs
class DeferredAttribute:
"""
A wrapper for a deferred-loading field. When the value is read from this
object the first time, the query is executed.
"""
def __init__(self, field):
self.field = field
def __get__(self, instance, cls=None):
"""
Retrieve and caches the value from the datastore on the first lookup.
Return the cached value.
"""
if instance is None:
return self
data = instance.__dict__
field_name = self.field.attname
if field_name not in data:
# Let's see if the field is part of the parent chain. If so we
# might be able to reuse the already loaded value. Refs #18343.
val = self._check_parent_chain(instance)
if val is None:
instance.refresh_from_db(fields=[field_name])
else:
data[field_name] = val
return data[field_name]
def _check_parent_chain(self, instance):
"""
Check if the field value can be fetched from a parent field already
loaded in the instance. This can be done if the to-be fetched
field is a primary key field.
"""
opts = instance._meta
link_field = opts.get_ancestor_link(self.field.model)
if self.field.primary_key and self.field != link_field:
return getattr(instance, link_field.attname)
return None
class class_or_instance_method:
"""
Hook used in RegisterLookupMixin to return partial functions depending on
the caller type (instance or class of models.Field).
"""
def __init__(self, class_method, instance_method):
self.class_method = class_method
self.instance_method = instance_method
def __get__(self, instance, owner):
if instance is None:
return functools.partial(self.class_method, owner)
return functools.partial(self.instance_method, instance)
class RegisterLookupMixin:
def _get_lookup(self, lookup_name):
return self.get_lookups().get(lookup_name, None)
@functools.lru_cache(maxsize=None)
def get_class_lookups(cls):
class_lookups = [
parent.__dict__.get("class_lookups", {}) for parent in inspect.getmro(cls)
]
return cls.merge_dicts(class_lookups)
def get_instance_lookups(self):
class_lookups = self.get_class_lookups()
if instance_lookups := getattr(self, "instance_lookups", None):
return {**class_lookups, **instance_lookups}
return class_lookups
get_lookups = class_or_instance_method(get_class_lookups, get_instance_lookups)
get_class_lookups = classmethod(get_class_lookups)
def get_lookup(self, lookup_name):
from django.db.models.lookups import Lookup
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, "output_field"):
return self.output_field.get_lookup(lookup_name)
if found is not None and not issubclass(found, Lookup):
return None
return found
def get_transform(self, lookup_name):
from django.db.models.lookups import Transform
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, "output_field"):
return self.output_field.get_transform(lookup_name)
if found is not None and not issubclass(found, Transform):
return None
return found
@staticmethod
def merge_dicts(dicts):
"""
Merge dicts in reverse to preference the order of the original list. e.g.,
merge_dicts([a, b]) will preference the keys in 'a' over those in 'b'.
"""
merged = {}
for d in reversed(dicts):
merged.update(d)
return merged
@classmethod
def _clear_cached_class_lookups(cls):
for subclass in subclasses(cls):
subclass.get_class_lookups.cache_clear()
def register_class_lookup(cls, lookup, lookup_name=None):
if lookup_name is None:
lookup_name = lookup.lookup_name
if "class_lookups" not in cls.__dict__:
cls.class_lookups = {}
cls.class_lookups[lookup_name] = lookup
cls._clear_cached_class_lookups()
return lookup
def register_instance_lookup(self, lookup, lookup_name=None):
if lookup_name is None:
lookup_name = lookup.lookup_name
if "instance_lookups" not in self.__dict__:
self.instance_lookups = {}
self.instance_lookups[lookup_name] = lookup
return lookup
register_lookup = class_or_instance_method(
register_class_lookup, register_instance_lookup
)
register_class_lookup = classmethod(register_class_lookup)
def _unregister_class_lookup(cls, lookup, lookup_name=None):
"""
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
"""
if lookup_name is None:
lookup_name = lookup.lookup_name
del cls.class_lookups[lookup_name]
cls._clear_cached_class_lookups()
def _unregister_instance_lookup(self, lookup, lookup_name=None):
"""
Remove given lookup from instance lookups. For use in tests only as
it's not thread-safe.
"""
if lookup_name is None:
lookup_name = lookup.lookup_name
del self.instance_lookups[lookup_name]
_unregister_lookup = class_or_instance_method(
_unregister_class_lookup, _unregister_instance_lookup
)
_unregister_class_lookup = classmethod(_unregister_class_lookup)
def select_related_descend(field, restricted, requested, select_mask, reverse=False):
"""
Return True if this field should be used to descend deeper for
select_related() purposes. Used by both the query construction code
(compiler.get_related_selections()) and the model instance creation code
(compiler.klass_info).
Arguments:
* field - the field to be checked
* restricted - a boolean field, indicating if the field list has been
manually restricted using a requested clause)
* requested - The select_related() dictionary.
* select_mask - the dictionary of selected fields.
* reverse - boolean, True if we are checking a reverse select related
"""
if not field.remote_field:
return False
if field.remote_field.parent_link and not reverse:
return False
if restricted:
if reverse and field.related_query_name() not in requested:
return False
if not reverse and field.name not in requested:
return False
if not restricted and field.null:
return False
if (
restricted
and select_mask
and field.name in requested
and field not in select_mask
):
raise FieldError(
f"Field {field.model._meta.object_name}.{field.name} cannot be both "
"deferred and traversed using select_related at the same time."
)
return True
def refs_expression(lookup_parts, annotations):
"""
Check if the lookup_parts contains references to the given annotations set.
Because the LOOKUP_SEP is contained in the default annotation names, check
each prefix of the lookup_parts for a match.
"""
for n in range(1, len(lookup_parts) + 1):
level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n])
if level_n_lookup in annotations and annotations[level_n_lookup]:
return annotations[level_n_lookup], lookup_parts[n:]
return False, ()
def check_rel_lookup_compatibility(model, target_opts, field):
"""
Check that self.model is compatible with target_opts. Compatibility
is OK if:
1) model and opts match (where proxy inheritance is removed)
2) model is parent of opts' model or the other way around
"""
def check(opts):
return (
model._meta.concrete_model == opts.concrete_model
or opts.concrete_model in model._meta.get_parent_list()
or model in opts.get_parent_list()
)
# If the field is a primary key, then doing a query against the field's
# model is ok, too. Consider the case:
# class Restaurant(models.Model):
# place = OneToOneField(Place, primary_key=True):
# Restaurant.objects.filter(pk__in=Restaurant.objects.all()).
# If we didn't have the primary key check, then pk__in (== place__in) would
# give Place's opts as the target opts, but Restaurant isn't compatible
# with that. This logic applies only to primary keys, as when doing __in=qs,
# we are going to turn this into __in=qs.values('pk') later on.
return check(target_opts) or (
getattr(field, "primary_key", False) and check(field.model._meta)
)
class FilteredRelation:
"""Specify custom filtering in the ON clause of SQL joins."""
def __init__(self, relation_name, *, condition=Q()):
if not relation_name:
raise ValueError("relation_name cannot be empty.")
self.relation_name = relation_name
self.alias = None
if not isinstance(condition, Q):
raise ValueError("condition argument must be a Q() instance.")
self.condition = condition
self.path = []
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.relation_name == other.relation_name
and self.alias == other.alias
and self.condition == other.condition
)
def clone(self):
clone = FilteredRelation(self.relation_name, condition=self.condition)
clone.alias = self.alias
clone.path = self.path[:]
return clone
def resolve_expression(self, *args, **kwargs):
"""
QuerySet.annotate() only accepts expression-like arguments
(with a resolve_expression() method).
"""
raise NotImplementedError("FilteredRelation.resolve_expression() is unused.")
def as_sql(self, compiler, connection):
# Resolve the condition in Join.filtered_relation.
query = compiler.query
where = query.build_filtered_relation_q(self.condition, reuse=set(self.path))
return compiler.compile(where)
|
b4d78f4c5c6bfffc483aae7ca5d1e13feaac23a1421f9a2913147e2da6d6989d | import copy
import datetime
import functools
import inspect
import warnings
from collections import defaultdict
from decimal import Decimal
from uuid import UUID
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DatabaseError, NotSupportedError, connection
from django.db.models import fields
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import Q
from django.utils.deconstruct import deconstructible
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
class SQLiteNumericMixin:
"""
Some expressions with output_field=DecimalField() must be cast to
numeric to be properly filtered.
"""
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
try:
if self.output_field.get_internal_type() == "DecimalField":
sql = "CAST(%s AS NUMERIC)" % sql
except FieldError:
pass
return sql, params
class Combinable:
"""
Provide the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = "+"
SUB = "-"
MUL = "*"
DIV = "/"
POW = "^"
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = "%%"
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = "&"
BITOR = "|"
BITLEFTSHIFT = "<<"
BITRIGHTSHIFT = ">>"
BITXOR = "#"
def _combine(self, other, connector, reversed):
if not hasattr(other, "resolve_expression"):
# everything must be resolvable to an expression
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __neg__(self):
return self._combine(-1, self.MUL, False)
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) & Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def bitleftshift(self, other):
return self._combine(other, self.BITLEFTSHIFT, False)
def bitrightshift(self, other):
return self._combine(other, self.BITRIGHTSHIFT, False)
def __xor__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) ^ Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitxor(self, other):
return self._combine(other, self.BITXOR, False)
def __or__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) | Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def __rxor__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
class BaseExpression:
"""Base class for all query expressions."""
empty_result_set_value = NotImplemented
# aggregate specific fields
is_summary = False
_output_field_resolved_to_none = False
# Can the expression be used in a WHERE clause?
filterable = True
# Can the expression can be used as a source expression in Window?
window_compatible = False
def __init__(self, output_field=None):
if output_field is not None:
self.output_field = output_field
def __getstate__(self):
state = self.__dict__.copy()
state.pop("convert_value", None)
return state
def get_db_converters(self, connection):
return (
[]
if self.convert_value is self._convert_value_noop
else [self.convert_value]
) + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert not exprs
def _parse_expressions(self, *expressions):
return [
arg
if hasattr(arg, "resolve_expression")
else (F(arg) if isinstance(arg, str) else Value(arg))
for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super().as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Return: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
return any(
expr and expr.contains_aggregate for expr in self.get_source_expressions()
)
@cached_property
def contains_over_clause(self):
return any(
expr and expr.contains_over_clause for expr in self.get_source_expressions()
)
@cached_property
def contains_column_references(self):
return any(
expr and expr.contains_column_references
for expr in self.get_source_expressions()
)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
"""
Provide the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Return: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions(
[
expr.resolve_expression(query, allow_joins, reuse, summarize)
if expr
else None
for expr in c.get_source_expressions()
]
)
return c
@property
def conditional(self):
return isinstance(self.output_field, fields.BooleanField)
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""Return the output type of this expressions."""
output_field = self._resolve_output_field()
if output_field is None:
self._output_field_resolved_to_none = True
raise FieldError("Cannot resolve expression type, unknown output_field")
return output_field
@cached_property
def _output_field_or_none(self):
"""
Return the output field of this expression, or None if
_resolve_output_field() didn't return an output type.
"""
try:
return self.output_field
except FieldError:
if not self._output_field_resolved_to_none:
raise
def _resolve_output_field(self):
"""
Attempt to infer the output type of the expression.
As a guess, if the output fields of all source fields match then simply
infer the same type here.
If a source's output field resolves to None, exclude it from this check.
If all sources are None, then an error is raised higher up the stack in
the output_field property.
"""
# This guess is mostly a bad idea, but there is quite a lot of code
# (especially 3rd party Func subclasses) that depend on it, we'd need a
# deprecation path to fix it.
sources_iter = (
source for source in self.get_source_fields() if source is not None
)
for output_field in sources_iter:
for source in sources_iter:
if not isinstance(output_field, source.__class__):
raise FieldError(
"Expression contains mixed types: %s, %s. You must "
"set output_field."
% (
output_field.__class__.__name__,
source.__class__.__name__,
)
)
return output_field
@staticmethod
def _convert_value_noop(value, expression, connection):
return value
@cached_property
def convert_value(self):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if internal_type == "FloatField":
return (
lambda value, expression, connection: None
if value is None
else float(value)
)
elif internal_type.endswith("IntegerField"):
return (
lambda value, expression, connection: None
if value is None
else int(value)
)
elif internal_type == "DecimalField":
return (
lambda value, expression, connection: None
if value is None
else Decimal(value)
)
return self._convert_value_noop
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[
e.relabeled_clone(change_map) if e is not None else None
for e in self.get_source_expressions()
]
)
return clone
def replace_expressions(self, replacements):
if replacement := replacements.get(self):
return replacement
clone = self.copy()
source_expressions = clone.get_source_expressions()
clone.set_source_expressions(
[
expr.replace_expressions(replacements) if expr else None
for expr in source_expressions
]
)
return clone
def copy(self):
return copy.copy(self)
def prefix_references(self, prefix):
clone = self.copy()
clone.set_source_expressions(
[
F(f"{prefix}{expr.name}")
if isinstance(expr, F)
else expr.prefix_references(prefix)
for expr in self.get_source_expressions()
]
)
return clone
def get_group_by_cols(self, alias=None):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""Return the underlying field types used by this aggregate."""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
if hasattr(expr, "flatten"):
yield from expr.flatten()
else:
yield expr
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, EXISTS expressions need
to be wrapped in CASE WHEN on Oracle.
"""
if hasattr(self.output_field, "select_format"):
return self.output_field.select_format(compiler, sql, params)
return sql, params
@deconstructible
class Expression(BaseExpression, Combinable):
"""An expression that can be combined with other expressions."""
@cached_property
def identity(self):
constructor_signature = inspect.signature(self.__init__)
args, kwargs = self._constructor_args
signature = constructor_signature.bind_partial(*args, **kwargs)
signature.apply_defaults()
arguments = signature.arguments.items()
identity = [self.__class__]
for arg, value in arguments:
if isinstance(value, fields.Field):
if value.name and value.model:
value = (value.model._meta.label, value.name)
else:
value = type(value)
else:
value = make_hashable(value)
identity.append((arg, value))
return tuple(identity)
def __eq__(self, other):
if not isinstance(other, Expression):
return NotImplemented
return other.identity == self.identity
def __hash__(self):
return hash(self.identity)
# Type inference for CombinedExpression.output_field.
# Missing items will result in FieldError, by design.
#
# The current approach for NULL is based on lowest common denominator behavior
# i.e. if one of the supported databases is raising an error (rather than
# return NULL) for `val <op> NULL`, then Django raises FieldError.
NoneType = type(None)
_connector_combinations = [
# Numeric operations - operands of same type.
{
connector: [
(fields.IntegerField, fields.IntegerField, fields.IntegerField),
(fields.FloatField, fields.FloatField, fields.FloatField),
(fields.DecimalField, fields.DecimalField, fields.DecimalField),
]
for connector in (
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
# Behavior for DIV with integer arguments follows Postgres/SQLite,
# not MySQL/Oracle.
Combinable.DIV,
Combinable.MOD,
Combinable.POW,
)
},
# Numeric operations - operands of different type.
{
connector: [
(fields.IntegerField, fields.DecimalField, fields.DecimalField),
(fields.DecimalField, fields.IntegerField, fields.DecimalField),
(fields.IntegerField, fields.FloatField, fields.FloatField),
(fields.FloatField, fields.IntegerField, fields.FloatField),
]
for connector in (
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
Combinable.DIV,
)
},
# Bitwise operators.
{
connector: [
(fields.IntegerField, fields.IntegerField, fields.IntegerField),
]
for connector in (
Combinable.BITAND,
Combinable.BITOR,
Combinable.BITLEFTSHIFT,
Combinable.BITRIGHTSHIFT,
Combinable.BITXOR,
)
},
# Numeric with NULL.
{
connector: [
(field_type, NoneType, field_type),
(NoneType, field_type, field_type),
]
for connector in (
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
Combinable.DIV,
Combinable.MOD,
Combinable.POW,
)
for field_type in (fields.IntegerField, fields.DecimalField, fields.FloatField)
},
# Date/DateTimeField/DurationField/TimeField.
{
Combinable.ADD: [
# Date/DateTimeField.
(fields.DateField, fields.DurationField, fields.DateTimeField),
(fields.DateTimeField, fields.DurationField, fields.DateTimeField),
(fields.DurationField, fields.DateField, fields.DateTimeField),
(fields.DurationField, fields.DateTimeField, fields.DateTimeField),
# DurationField.
(fields.DurationField, fields.DurationField, fields.DurationField),
# TimeField.
(fields.TimeField, fields.DurationField, fields.TimeField),
(fields.DurationField, fields.TimeField, fields.TimeField),
],
},
{
Combinable.SUB: [
# Date/DateTimeField.
(fields.DateField, fields.DurationField, fields.DateTimeField),
(fields.DateTimeField, fields.DurationField, fields.DateTimeField),
(fields.DateField, fields.DateField, fields.DurationField),
(fields.DateField, fields.DateTimeField, fields.DurationField),
(fields.DateTimeField, fields.DateField, fields.DurationField),
(fields.DateTimeField, fields.DateTimeField, fields.DurationField),
# DurationField.
(fields.DurationField, fields.DurationField, fields.DurationField),
# TimeField.
(fields.TimeField, fields.DurationField, fields.TimeField),
(fields.TimeField, fields.TimeField, fields.DurationField),
],
},
]
_connector_combinators = defaultdict(list)
def register_combinable_fields(lhs, connector, rhs, result):
"""
Register combinable types:
lhs <connector> rhs -> result
e.g.
register_combinable_fields(
IntegerField, Combinable.ADD, FloatField, FloatField
)
"""
_connector_combinators[connector].append((lhs, rhs, result))
for d in _connector_combinations:
for connector, field_types in d.items():
for lhs, rhs, result in field_types:
register_combinable_fields(lhs, connector, rhs, result)
@functools.lru_cache(maxsize=128)
def _resolve_combined_type(connector, lhs_type, rhs_type):
combinators = _connector_combinators.get(connector, ())
for combinator_lhs_type, combinator_rhs_type, combined_type in combinators:
if issubclass(lhs_type, combinator_lhs_type) and issubclass(
rhs_type, combinator_rhs_type
):
return combined_type
class CombinedExpression(SQLiteNumericMixin, Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super().__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def _resolve_output_field(self):
# We avoid using super() here for reasons given in
# Expression._resolve_output_field()
combined_type = _resolve_combined_type(
self.connector,
type(self.lhs._output_field_or_none),
type(self.rhs._output_field_or_none),
)
if combined_type is None:
raise FieldError(
f"Cannot infer type of {self.connector!r} expression involving these "
f"types: {self.lhs.output_field.__class__.__name__}, "
f"{self.rhs.output_field.__class__.__name__}. You must set "
f"output_field."
)
return combined_type()
def as_sql(self, compiler, connection):
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = "(%s)"
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
lhs = self.lhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
rhs = self.rhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
if not isinstance(self, (DurationExpression, TemporalSubtraction)):
try:
lhs_type = lhs.output_field.get_internal_type()
except (AttributeError, FieldError):
lhs_type = None
try:
rhs_type = rhs.output_field.get_internal_type()
except (AttributeError, FieldError):
rhs_type = None
if "DurationField" in {lhs_type, rhs_type} and lhs_type != rhs_type:
return DurationExpression(
self.lhs, self.connector, self.rhs
).resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
datetime_fields = {"DateField", "DateTimeField", "TimeField"}
if (
self.connector == self.SUB
and lhs_type in datetime_fields
and lhs_type == rhs_type
):
return TemporalSubtraction(self.lhs, self.rhs).resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
c = self.copy()
c.is_summary = summarize
c.lhs = lhs
c.rhs = rhs
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == "DurationField":
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
if connection.features.has_native_duration_field:
return super().as_sql(compiler, connection)
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = "(%s)"
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
if self.connector in {Combinable.MUL, Combinable.DIV}:
try:
lhs_type = self.lhs.output_field.get_internal_type()
rhs_type = self.rhs.output_field.get_internal_type()
except (AttributeError, FieldError):
pass
else:
allowed_fields = {
"DecimalField",
"DurationField",
"FloatField",
"IntegerField",
}
if lhs_type not in allowed_fields or rhs_type not in allowed_fields:
raise DatabaseError(
f"Invalid arguments for operator {self.connector}."
)
return sql, params
class TemporalSubtraction(CombinedExpression):
output_field = fields.DurationField()
def __init__(self, lhs, rhs):
super().__init__(lhs, self.SUB, rhs)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
lhs = compiler.compile(self.lhs)
rhs = compiler.compile(self.rhs)
return connection.ops.subtract_temporals(
self.lhs.output_field.get_internal_type(), lhs, rhs
)
@deconstructible(path="django.db.models.F")
class F(Combinable):
"""An object capable of resolving references to existing query objects."""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def replace_expressions(self, replacements):
return replacements.get(self, self)
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.name == other.name
def __hash__(self):
return hash(self.name)
class ResolvedOuterRef(F):
"""
An object that contains a reference to an outer query.
In this case, the reference to the outer query has been resolved because
the inner query has been used as a subquery.
"""
contains_aggregate = False
contains_over_clause = False
def as_sql(self, *args, **kwargs):
raise ValueError(
"This queryset contains a reference to an outer query and may "
"only be used in a subquery."
)
def resolve_expression(self, *args, **kwargs):
col = super().resolve_expression(*args, **kwargs)
# FIXME: Rename possibly_multivalued to multivalued and fix detection
# for non-multivalued JOINs (e.g. foreign key fields). This should take
# into account only many-to-many and one-to-many relationships.
col.possibly_multivalued = LOOKUP_SEP in self.name
return col
def relabeled_clone(self, relabels):
return self
def get_group_by_cols(self, alias=None):
return []
class OuterRef(F):
contains_aggregate = False
def resolve_expression(self, *args, **kwargs):
if isinstance(self.name, self.__class__):
return self.name
return ResolvedOuterRef(self.name)
def relabeled_clone(self, relabels):
return self
@deconstructible(path="django.db.models.Func")
class Func(SQLiteNumericMixin, Expression):
"""An SQL function call."""
function = None
template = "%(function)s(%(expressions)s)"
arg_joiner = ", "
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, output_field=None, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)"
% (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
super().__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = {**self.extra, **self._get_repr_options()}
if extra:
extra = ", ".join(
str(key) + "=" + str(val) for key, val in sorted(extra.items())
)
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def _get_repr_options(self):
"""Return a dict of extra __init__() options to include in the repr."""
return {}
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def as_sql(
self,
compiler,
connection,
function=None,
template=None,
arg_joiner=None,
**extra_context,
):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
try:
arg_sql, arg_params = compiler.compile(arg)
except EmptyResultSet:
empty_result_set_value = getattr(
arg, "empty_result_set_value", NotImplemented
)
if empty_result_set_value is NotImplemented:
raise
arg_sql, arg_params = compiler.compile(Value(empty_result_set_value))
sql_parts.append(arg_sql)
params.extend(arg_params)
data = {**self.extra, **extra_context}
# Use the first supplied value in this order: the parameter to this
# method, a value supplied in __init__()'s **extra (the value in
# `data`), or the value defined on the class.
if function is not None:
data["function"] = function
else:
data.setdefault("function", self.function)
template = template or data.get("template", self.template)
arg_joiner = arg_joiner or data.get("arg_joiner", self.arg_joiner)
data["expressions"] = data["field"] = arg_joiner.join(sql_parts)
return template % data, params
def copy(self):
copy = super().copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
@deconstructible(path="django.db.models.Value")
class Value(SQLiteNumericMixin, Expression):
"""Represent a wrapped value as a node within an expression."""
# Provide a default value for `for_save` in order to allow unresolved
# instances to be compiled until a decision is taken in #25425.
for_save = False
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super().__init__(output_field=output_field)
self.value = value
def __repr__(self):
return f"{self.__class__.__name__}({self.value!r})"
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
output_field = self._output_field_or_none
if output_field is not None:
if self.for_save:
val = output_field.get_db_prep_save(val, connection=connection)
else:
val = output_field.get_db_prep_value(val, connection=connection)
if hasattr(output_field, "get_placeholder"):
return output_field.get_placeholder(val, compiler, connection), [val]
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return "NULL", []
return "%s", [val]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self, alias=None):
return []
def _resolve_output_field(self):
if isinstance(self.value, str):
return fields.CharField()
if isinstance(self.value, bool):
return fields.BooleanField()
if isinstance(self.value, int):
return fields.IntegerField()
if isinstance(self.value, float):
return fields.FloatField()
if isinstance(self.value, datetime.datetime):
return fields.DateTimeField()
if isinstance(self.value, datetime.date):
return fields.DateField()
if isinstance(self.value, datetime.time):
return fields.TimeField()
if isinstance(self.value, datetime.timedelta):
return fields.DurationField()
if isinstance(self.value, Decimal):
return fields.DecimalField()
if isinstance(self.value, bytes):
return fields.BinaryField()
if isinstance(self.value, UUID):
return fields.UUIDField()
@property
def empty_result_set_value(self):
return self.value
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super().__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return "(%s)" % self.sql, self.params
def get_group_by_cols(self, alias=None):
return [self]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# Resolve parents fields used in raw SQL.
if query.model:
for parent in query.model._meta.get_parent_list():
for parent_field in parent._meta.local_fields:
_, column_name = parent_field.get_attname_column()
if column_name.lower() in self.sql.lower():
query.resolve_ref(
parent_field.name, allow_joins, reuse, summarize
)
break
return super().resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return "*", []
class Col(Expression):
contains_column_references = True
possibly_multivalued = False
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super().__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
alias, target = self.alias, self.target
identifiers = (alias, str(target)) if alias else (str(target),)
return "{}({})".format(self.__class__.__name__, ", ".join(identifiers))
def as_sql(self, compiler, connection):
alias, column = self.alias, self.target.column
identifiers = (alias, column) if alias else (column,)
sql = ".".join(map(compiler.quote_name_unless_alias, identifiers))
return sql, []
def relabeled_clone(self, relabels):
if self.alias is None:
return self
return self.__class__(
relabels.get(self.alias, self.alias), self.target, self.output_field
)
def get_group_by_cols(self, alias=None):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return self.output_field.get_db_converters(
connection
) + self.target.get_db_converters(connection)
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super().__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
(self.source,) = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return connection.ops.quote_name(self.refs), []
def get_group_by_cols(self, alias=None):
return [self]
class ExpressionList(Func):
"""
An expression containing multiple expressions. Can be used to provide a
list of expressions as an argument to another expression, like a partition
clause.
"""
template = "%(expressions)s"
def __init__(self, *expressions, **extra):
if not expressions:
raise ValueError(
"%s requires at least one expression." % self.__class__.__name__
)
super().__init__(*expressions, **extra)
def __str__(self):
return self.arg_joiner.join(str(arg) for arg in self.source_expressions)
def as_sqlite(self, compiler, connection, **extra_context):
# Casting to numeric is unnecessary.
return self.as_sql(compiler, connection, **extra_context)
class OrderByList(Func):
template = "ORDER BY %(expressions)s"
def __init__(self, *expressions, **extra):
expressions = (
(
OrderBy(F(expr[1:]), descending=True)
if isinstance(expr, str) and expr[0] == "-"
else expr
)
for expr in expressions
)
super().__init__(*expressions, **extra)
def as_sql(self, *args, **kwargs):
if not self.source_expressions:
return "", ()
return super().as_sql(*args, **kwargs)
def get_group_by_cols(self):
group_by_cols = []
for order_by in self.get_source_expressions():
group_by_cols.extend(order_by.get_group_by_cols())
return group_by_cols
@deconstructible(path="django.db.models.ExpressionWrapper")
class ExpressionWrapper(SQLiteNumericMixin, Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super().__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def get_group_by_cols(self, alias=None):
if isinstance(self.expression, Expression):
expression = self.expression.copy()
expression.output_field = self.output_field
return expression.get_group_by_cols(alias=alias)
# For non-expressions e.g. an SQL WHERE clause, the entire
# `expression` must be included in the GROUP BY clause.
return super().get_group_by_cols()
def as_sql(self, compiler, connection):
return compiler.compile(self.expression)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
@deconstructible(path="django.db.models.When")
class When(Expression):
template = "WHEN %(condition)s THEN %(result)s"
# This isn't a complete conditional expression, must be used in Case().
conditional = False
def __init__(self, condition=None, then=None, **lookups):
if lookups:
if condition is None:
condition, lookups = Q(**lookups), None
elif getattr(condition, "conditional", False):
condition, lookups = Q(condition, **lookups), None
if condition is None or not getattr(condition, "conditional", False) or lookups:
raise TypeError(
"When() supports a Q object, a boolean expression, or lookups "
"as a condition."
)
if isinstance(condition, Q) and not condition:
raise ValueError("An empty Q() can't be used as a When() condition.")
super().__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
if hasattr(c.condition, "resolve_expression"):
c.condition = c.condition.resolve_expression(
query, allow_joins, reuse, summarize, False
)
c.result = c.result.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = extra_context
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
# Filters that match everything are handled as empty strings in the
# WHERE clause, but in a CASE WHEN expression they must use a predicate
# that's always True.
if condition_sql == "":
if connection.features.supports_boolean_expr_in_select_clause:
condition_sql, condition_params = compiler.compile(Value(True))
else:
condition_sql, condition_params = "1=1", ()
template_params["condition"] = condition_sql
result_sql, result_params = compiler.compile(self.result)
template_params["result"] = result_sql
template = template or self.template
return template % template_params, (
*sql_params,
*condition_params,
*result_params,
)
def get_group_by_cols(self, alias=None):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
@deconstructible(path="django.db.models.Case")
class Case(SQLiteNumericMixin, Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = "CASE %(cases)s ELSE %(default)s END"
case_joiner = " "
def __init__(self, *cases, default=None, output_field=None, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
super().__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
self.extra = extra
def __str__(self):
return "CASE %s, ELSE %r" % (
", ".join(str(c) for c in self.cases),
self.default,
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
*self.cases, self.default = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
c.default = c.default.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def copy(self):
c = super().copy()
c.cases = c.cases[:]
return c
def as_sql(
self, compiler, connection, template=None, case_joiner=None, **extra_context
):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = {**self.extra, **extra_context}
case_parts = []
sql_params = []
for case in self.cases:
try:
case_sql, case_params = compiler.compile(case)
except EmptyResultSet:
continue
case_parts.append(case_sql)
sql_params.extend(case_params)
default_sql, default_params = compiler.compile(self.default)
if not case_parts:
return default_sql, default_params
case_joiner = case_joiner or self.case_joiner
template_params["cases"] = case_joiner.join(case_parts)
template_params["default"] = default_sql
sql_params.extend(default_params)
template = template or template_params.get("template", self.template)
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
def get_group_by_cols(self, alias=None):
if not self.cases:
return self.default.get_group_by_cols(alias)
return super().get_group_by_cols(alias)
class Subquery(BaseExpression, Combinable):
"""
An explicit subquery. It may contain OuterRef() references to the outer
query which will be resolved when it is applied to that query.
"""
template = "(%(subquery)s)"
contains_aggregate = False
empty_result_set_value = None
def __init__(self, queryset, output_field=None, **extra):
# Allow the usage of both QuerySet and sql.Query objects.
self.query = getattr(queryset, "query", queryset).clone()
self.query.subquery = True
self.extra = extra
super().__init__(output_field)
def get_source_expressions(self):
return [self.query]
def set_source_expressions(self, exprs):
self.query = exprs[0]
def _resolve_output_field(self):
return self.query.output_field
def copy(self):
clone = super().copy()
clone.query = clone.query.clone()
return clone
@property
def external_aliases(self):
return self.query.external_aliases
def get_external_cols(self):
return self.query.get_external_cols()
def as_sql(self, compiler, connection, template=None, query=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = {**self.extra, **extra_context}
query = query or self.query
subquery_sql, sql_params = query.as_sql(compiler, connection)
template_params["subquery"] = subquery_sql[1:-1]
template = template or template_params.get("template", self.template)
sql = template % template_params
return sql, sql_params
def get_group_by_cols(self, alias=None):
# If this expression is referenced by an alias for an explicit GROUP BY
# through values() a reference to this expression and not the
# underlying .query must be returned to ensure external column
# references are not grouped against as well.
if alias:
return [Ref(alias, self)]
return self.query.get_group_by_cols()
class Exists(Subquery):
template = "EXISTS(%(subquery)s)"
output_field = fields.BooleanField()
def __init__(self, queryset, negated=False, **kwargs):
self.negated = negated
super().__init__(queryset, **kwargs)
def __invert__(self):
clone = self.copy()
clone.negated = not self.negated
return clone
def get_group_by_cols(self, alias=None):
# self.query only gets limited to a single row in the .exists() call
# from self.as_sql() so deferring to Query.get_group_by_cols() is
# inappropriate.
if alias is None:
return [self]
return super().get_group_by_cols(alias)
def as_sql(self, compiler, connection, template=None, **extra_context):
query = self.query.exists(using=connection.alias)
try:
sql, params = super().as_sql(
compiler,
connection,
template=template,
query=query,
**extra_context,
)
except EmptyResultSet:
if self.negated:
features = compiler.connection.features
if not features.supports_boolean_expr_in_select_clause:
return "1=1", ()
return compiler.compile(Value(True))
raise
if self.negated:
sql = "NOT {}".format(sql)
return sql, params
def select_format(self, compiler, sql, params):
# Wrap EXISTS() with a CASE WHEN expression if a database backend
# (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP
# BY list.
if not compiler.connection.features.supports_boolean_expr_in_select_clause:
sql = "CASE WHEN {} THEN 1 ELSE 0 END".format(sql)
return sql, params
@deconstructible(path="django.db.models.OrderBy")
class OrderBy(Expression):
template = "%(expression)s %(ordering)s"
conditional = False
def __init__(self, expression, descending=False, nulls_first=None, nulls_last=None):
if nulls_first and nulls_last:
raise ValueError("nulls_first and nulls_last are mutually exclusive")
if nulls_first is False or nulls_last is False:
# When the deprecation ends, replace with:
# raise ValueError(
# "nulls_first and nulls_last values must be True or None."
# )
warnings.warn(
"Passing nulls_first=False or nulls_last=False is deprecated, use None "
"instead.",
RemovedInDjango50Warning,
stacklevel=2,
)
self.nulls_first = nulls_first
self.nulls_last = nulls_last
self.descending = descending
if not hasattr(expression, "resolve_expression"):
raise ValueError("expression must be an expression type")
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending
)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection, template=None, **extra_context):
template = template or self.template
if connection.features.supports_order_by_nulls_modifier:
if self.nulls_last:
template = "%s NULLS LAST" % template
elif self.nulls_first:
template = "%s NULLS FIRST" % template
else:
if self.nulls_last and not (
self.descending and connection.features.order_by_nulls_first
):
template = "%%(expression)s IS NULL, %s" % template
elif self.nulls_first and not (
not self.descending and connection.features.order_by_nulls_first
):
template = "%%(expression)s IS NOT NULL, %s" % template
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {
"expression": expression_sql,
"ordering": "DESC" if self.descending else "ASC",
**extra_context,
}
params *= template.count("%(expression)s")
return (template % placeholders).rstrip(), params
def as_oracle(self, compiler, connection):
# Oracle doesn't allow ORDER BY EXISTS() or filters unless it's wrapped
# in a CASE WHEN.
if connection.ops.conditional_expression_supported_in_where_clause(
self.expression
):
copy = self.copy()
copy.expression = Case(
When(self.expression, then=True),
default=False,
)
return copy.as_sql(compiler, connection)
return self.as_sql(compiler, connection)
def get_group_by_cols(self, alias=None):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
if self.nulls_first:
self.nulls_last = True
self.nulls_first = None
elif self.nulls_last:
self.nulls_first = True
self.nulls_last = None
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
class Window(SQLiteNumericMixin, Expression):
template = "%(expression)s OVER (%(window)s)"
# Although the main expression may either be an aggregate or an
# expression with an aggregate function, the GROUP BY that will
# be introduced in the query as a result is not desired.
contains_aggregate = False
contains_over_clause = True
def __init__(
self,
expression,
partition_by=None,
order_by=None,
frame=None,
output_field=None,
):
self.partition_by = partition_by
self.order_by = order_by
self.frame = frame
if not getattr(expression, "window_compatible", False):
raise ValueError(
"Expression '%s' isn't compatible with OVER clauses."
% expression.__class__.__name__
)
if self.partition_by is not None:
if not isinstance(self.partition_by, (tuple, list)):
self.partition_by = (self.partition_by,)
self.partition_by = ExpressionList(*self.partition_by)
if self.order_by is not None:
if isinstance(self.order_by, (list, tuple)):
self.order_by = OrderByList(*self.order_by)
elif isinstance(self.order_by, (BaseExpression, str)):
self.order_by = OrderByList(self.order_by)
else:
raise ValueError(
"Window.order_by must be either a string reference to a "
"field, an expression, or a list or tuple of them."
)
super().__init__(output_field=output_field)
self.source_expression = self._parse_expressions(expression)[0]
def _resolve_output_field(self):
return self.source_expression.output_field
def get_source_expressions(self):
return [self.source_expression, self.partition_by, self.order_by, self.frame]
def set_source_expressions(self, exprs):
self.source_expression, self.partition_by, self.order_by, self.frame = exprs
def as_sql(self, compiler, connection, template=None):
connection.ops.check_expression_support(self)
if not connection.features.supports_over_clause:
raise NotSupportedError("This backend does not support window expressions.")
expr_sql, params = compiler.compile(self.source_expression)
window_sql, window_params = [], ()
if self.partition_by is not None:
sql_expr, sql_params = self.partition_by.as_sql(
compiler=compiler,
connection=connection,
template="PARTITION BY %(expressions)s",
)
window_sql.append(sql_expr)
window_params += tuple(sql_params)
if self.order_by is not None:
order_sql, order_params = compiler.compile(self.order_by)
window_sql.append(order_sql)
window_params += tuple(order_params)
if self.frame:
frame_sql, frame_params = compiler.compile(self.frame)
window_sql.append(frame_sql)
window_params += tuple(frame_params)
template = template or self.template
return (
template % {"expression": expr_sql, "window": " ".join(window_sql).strip()},
(*params, *window_params),
)
def as_sqlite(self, compiler, connection):
if isinstance(self.output_field, fields.DecimalField):
# Casting to numeric must be outside of the window expression.
copy = self.copy()
source_expressions = copy.get_source_expressions()
source_expressions[0].output_field = fields.FloatField()
copy.set_source_expressions(source_expressions)
return super(Window, copy).as_sqlite(compiler, connection)
return self.as_sql(compiler, connection)
def __str__(self):
return "{} OVER ({}{}{})".format(
str(self.source_expression),
"PARTITION BY " + str(self.partition_by) if self.partition_by else "",
str(self.order_by or ""),
str(self.frame or ""),
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self, alias=None):
group_by_cols = []
if self.partition_by:
group_by_cols.extend(self.partition_by.get_group_by_cols())
if self.order_by is not None:
group_by_cols.extend(self.order_by.get_group_by_cols())
return group_by_cols
class WindowFrame(Expression):
"""
Model the frame clause in window expressions. There are two types of frame
clauses which are subclasses, however, all processing and validation (by no
means intended to be complete) is done here. Thus, providing an end for a
frame is optional (the default is UNBOUNDED FOLLOWING, which is the last
row in the frame).
"""
template = "%(frame_type)s BETWEEN %(start)s AND %(end)s"
def __init__(self, start=None, end=None):
self.start = Value(start)
self.end = Value(end)
def set_source_expressions(self, exprs):
self.start, self.end = exprs
def get_source_expressions(self):
return [self.start, self.end]
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
start, end = self.window_frame_start_end(
connection, self.start.value, self.end.value
)
return (
self.template
% {
"frame_type": self.frame_type,
"start": start,
"end": end,
},
[],
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self, alias=None):
return []
def __str__(self):
if self.start.value is not None and self.start.value < 0:
start = "%d %s" % (abs(self.start.value), connection.ops.PRECEDING)
elif self.start.value is not None and self.start.value == 0:
start = connection.ops.CURRENT_ROW
else:
start = connection.ops.UNBOUNDED_PRECEDING
if self.end.value is not None and self.end.value > 0:
end = "%d %s" % (self.end.value, connection.ops.FOLLOWING)
elif self.end.value is not None and self.end.value == 0:
end = connection.ops.CURRENT_ROW
else:
end = connection.ops.UNBOUNDED_FOLLOWING
return self.template % {
"frame_type": self.frame_type,
"start": start,
"end": end,
}
def window_frame_start_end(self, connection, start, end):
raise NotImplementedError("Subclasses must implement window_frame_start_end().")
class RowRange(WindowFrame):
frame_type = "ROWS"
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_rows_start_end(start, end)
class ValueRange(WindowFrame):
frame_type = "RANGE"
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_range_start_end(start, end)
|
d93d716bc9eaa69cb84138a57f202f5f44b621cdbb8f461f54efa333ab131dc0 | """
Accessors for related objects.
When a field defines a relation between two models, each model class provides
an attribute to access related instances of the other model class (unless the
reverse accessor has been disabled with related_name='+').
Accessors are implemented as descriptors in order to customize access and
assignment. This module defines the descriptor classes.
Forward accessors follow foreign keys. Reverse accessors trace them back. For
example, with the following models::
class Parent(Model):
pass
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``child.parent`` is a forward many-to-one relation. ``parent.children`` is a
reverse many-to-one relation.
There are three types of relations (many-to-one, one-to-one, and many-to-many)
and two directions (forward and reverse) for a total of six combinations.
1. Related instance on the forward side of a many-to-one relation:
``ForwardManyToOneDescriptor``.
Uniqueness of foreign key values is irrelevant to accessing the related
instance, making the many-to-one and one-to-one cases identical as far as
the descriptor is concerned. The constraint is checked upstream (unicity
validation in forms) or downstream (unique indexes in the database).
2. Related instance on the forward side of a one-to-one
relation: ``ForwardOneToOneDescriptor``.
It avoids querying the database when accessing the parent link field in
a multi-table inheritance scenario.
3. Related instance on the reverse side of a one-to-one relation:
``ReverseOneToOneDescriptor``.
One-to-one relations are asymmetrical, despite the apparent symmetry of the
name, because they're implemented in the database with a foreign key from
one table to another. As a consequence ``ReverseOneToOneDescriptor`` is
slightly different from ``ForwardManyToOneDescriptor``.
4. Related objects manager for related instances on the reverse side of a
many-to-one relation: ``ReverseManyToOneDescriptor``.
Unlike the previous two classes, this one provides access to a collection
of objects. It returns a manager rather than an instance.
5. Related objects manager for related instances on the forward or reverse
sides of a many-to-many relation: ``ManyToManyDescriptor``.
Many-to-many relations are symmetrical. The syntax of Django models
requires declaring them on one side but that's an implementation detail.
They could be declared on the other side without any change in behavior.
Therefore the forward and reverse descriptors can be the same.
If you're looking for ``ForwardManyToManyDescriptor`` or
``ReverseManyToManyDescriptor``, use ``ManyToManyDescriptor`` instead.
"""
from django.core.exceptions import FieldError
from django.db import (
DEFAULT_DB_ALIAS,
NotSupportedError,
connections,
router,
transaction,
)
from django.db.models import Q, Window, signals
from django.db.models.functions import RowNumber
from django.db.models.lookups import GreaterThan, LessThanOrEqual
from django.db.models.query import QuerySet
from django.db.models.query_utils import DeferredAttribute
from django.db.models.utils import resolve_callables
from django.utils.functional import cached_property
class ForeignKeyDeferredAttribute(DeferredAttribute):
def __set__(self, instance, value):
if instance.__dict__.get(self.field.attname) != value and self.field.is_cached(
instance
):
self.field.delete_cached_value(instance)
instance.__dict__[self.field.attname] = value
def _filter_prefetch_queryset(queryset, field_name, instances):
predicate = Q(**{f"{field_name}__in": instances})
db = queryset._db or DEFAULT_DB_ALIAS
if queryset.query.is_sliced:
if not connections[db].features.supports_over_clause:
raise NotSupportedError(
"Prefetching from a limited queryset is only supported on backends "
"that support window functions."
)
low_mark, high_mark = queryset.query.low_mark, queryset.query.high_mark
order_by = [
expr for expr, _ in queryset.query.get_compiler(using=db).get_order_by()
]
window = Window(RowNumber(), partition_by=field_name, order_by=order_by)
predicate &= GreaterThan(window, low_mark)
if high_mark is not None:
predicate &= LessThanOrEqual(window, high_mark)
queryset.query.clear_limits()
return queryset.filter(predicate)
class ForwardManyToOneDescriptor:
"""
Accessor to the related object on the forward side of a many-to-one or
one-to-one (via ForwardOneToOneDescriptor subclass) relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``Child.parent`` is a ``ForwardManyToOneDescriptor`` instance.
"""
def __init__(self, field_with_rel):
self.field = field_with_rel
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `self.field.model` might
# still be a string model reference.
return type(
"RelatedObjectDoesNotExist",
(self.field.remote_field.model.DoesNotExist, AttributeError),
{
"__module__": self.field.model.__module__,
"__qualname__": "%s.%s.RelatedObjectDoesNotExist"
% (
self.field.model.__qualname__,
self.field.name,
),
},
)
def is_cached(self, instance):
return self.field.is_cached(instance)
def get_queryset(self, **hints):
return self.field.remote_field.model._base_manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
related_field = self.field.foreign_related_fields[0]
remote_field = self.field.remote_field
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {
"%s__in"
% related_field.name: {instance_attr(inst)[0] for inst in instances}
}
else:
query = {"%s__in" % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not remote_field.multiple:
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
remote_field.set_cached_value(rel_obj, instance)
return (
queryset,
rel_obj_attr,
instance_attr,
True,
self.field.get_cache_name(),
False,
)
def get_object(self, instance):
qs = self.get_queryset(instance=instance)
# Assuming the database enforces foreign keys, this won't fail.
return qs.get(self.field.get_reverse_related_filter(instance))
def __get__(self, instance, cls=None):
"""
Get the related instance through the forward relation.
With the example above, when getting ``child.parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``cls`` is the ``Child`` class (we don't need it)
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached
# by the field on the model instance state. It can also be pre-cached
# by the reverse accessor (ReverseOneToOneDescriptor).
try:
rel_obj = self.field.get_cached_value(instance)
except KeyError:
has_value = None not in self.field.get_local_related_value(instance)
ancestor_link = (
instance._meta.get_ancestor_link(self.field.model)
if has_value
else None
)
if ancestor_link and ancestor_link.is_cached(instance):
# An ancestor link will exist if this field is defined on a
# multi-table inheritance parent of the instance's class.
ancestor = ancestor_link.get_cached_value(instance)
# The value might be cached on an ancestor if the instance
# originated from walking down the inheritance chain.
rel_obj = self.field.get_cached_value(ancestor, default=None)
else:
rel_obj = None
if rel_obj is None and has_value:
rel_obj = self.get_object(instance)
remote_field = self.field.remote_field
# If this is a one-to-one relation, set the reverse accessor
# cache on the related object to the current instance to avoid
# an extra SQL query if it's accessed later on.
if not remote_field.multiple:
remote_field.set_cached_value(rel_obj, instance)
self.field.set_cached_value(instance, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the forward relation.
With the example above, when setting ``child.parent = parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``value`` is the ``parent`` instance on the right of the equal sign
"""
# An object must be an instance of the related class.
if value is not None and not isinstance(
value, self.field.remote_field.model._meta.concrete_model
):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.'
% (
value,
instance._meta.object_name,
self.field.name,
self.field.remote_field.model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(
instance.__class__, instance=value
)
if value._state.db is None:
value._state.db = router.db_for_write(
value.__class__, instance=instance
)
if not router.allow_relation(value, instance):
raise ValueError(
'Cannot assign "%r": the current database router prevents this '
"relation." % value
)
remote_field = self.field.remote_field
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = self.field.get_cached_value(instance, default=None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
remote_field.set_cached_value(related, None)
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, None)
# Set the values of the related field.
else:
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
# Set the related instance cache used by __get__ to avoid an SQL query
# when accessing the attribute we just set.
self.field.set_cached_value(instance, value)
# If this is a one-to-one relation, set the reverse accessor cache on
# the related object to the current instance to avoid an extra SQL
# query if it's accessed later on.
if value is not None and not remote_field.multiple:
remote_field.set_cached_value(value, instance)
def __reduce__(self):
"""
Pickling should return the instance attached by self.field on the
model, not a new copy of that descriptor. Use getattr() to retrieve
the instance directly from the model.
"""
return getattr, (self.field.model, self.field.name)
class ForwardOneToOneDescriptor(ForwardManyToOneDescriptor):
"""
Accessor to the related object on the forward side of a one-to-one relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``Restaurant.place`` is a ``ForwardOneToOneDescriptor`` instance.
"""
def get_object(self, instance):
if self.field.remote_field.parent_link:
deferred = instance.get_deferred_fields()
# Because it's a parent link, all the data is available in the
# instance, so populate the parent model with this data.
rel_model = self.field.remote_field.model
fields = [field.attname for field in rel_model._meta.concrete_fields]
# If any of the related model's fields are deferred, fallback to
# fetching all fields from the related model. This avoids a query
# on the related model for every deferred field.
if not any(field in fields for field in deferred):
kwargs = {field: getattr(instance, field) for field in fields}
obj = rel_model(**kwargs)
obj._state.adding = instance._state.adding
obj._state.db = instance._state.db
return obj
return super().get_object(instance)
def __set__(self, instance, value):
super().__set__(instance, value)
# If the primary key is a link to a parent model and a parent instance
# is being set, update the value of the inherited pk(s).
if self.field.primary_key and self.field.remote_field.parent_link:
opts = instance._meta
# Inherited primary key fields from this object's base classes.
inherited_pk_fields = [
field
for field in opts.concrete_fields
if field.primary_key and field.remote_field
]
for field in inherited_pk_fields:
rel_model_pk_name = field.remote_field.model._meta.pk.attname
raw_value = (
getattr(value, rel_model_pk_name) if value is not None else None
)
setattr(instance, rel_model_pk_name, raw_value)
class ReverseOneToOneDescriptor:
"""
Accessor to the related object on the reverse side of a one-to-one
relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``Place.restaurant`` is a ``ReverseOneToOneDescriptor`` instance.
"""
def __init__(self, related):
# Following the example above, `related` is an instance of OneToOneRel
# which represents the reverse restaurant field (place.restaurant).
self.related = related
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ForwardManyToOneDescriptor`.
return type(
"RelatedObjectDoesNotExist",
(self.related.related_model.DoesNotExist, AttributeError),
{
"__module__": self.related.model.__module__,
"__qualname__": "%s.%s.RelatedObjectDoesNotExist"
% (
self.related.model.__qualname__,
self.related.name,
),
},
)
def is_cached(self, instance):
return self.related.is_cached(instance)
def get_queryset(self, **hints):
return self.related.related_model._base_manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.related.field.get_local_related_value
instance_attr = self.related.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {"%s__in" % self.related.field.name: instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
self.related.field.set_cached_value(rel_obj, instance)
return (
queryset,
rel_obj_attr,
instance_attr,
True,
self.related.get_cache_name(),
False,
)
def __get__(self, instance, cls=None):
"""
Get the related instance through the reverse relation.
With the example above, when getting ``place.restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``cls`` is the ``Place`` class (unused)
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached
# by the field on the model instance state. It can also be pre-cached
# by the forward accessor (ForwardManyToOneDescriptor).
try:
rel_obj = self.related.get_cached_value(instance)
except KeyError:
related_pk = instance.pk
if related_pk is None:
rel_obj = None
else:
filter_args = self.related.field.get_forward_related_filter(instance)
try:
rel_obj = self.get_queryset(instance=instance).get(**filter_args)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
# Set the forward accessor cache on the related object to
# the current instance to avoid an extra SQL query if it's
# accessed later on.
self.related.field.set_cached_value(rel_obj, instance)
self.related.set_cached_value(instance, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s."
% (instance.__class__.__name__, self.related.get_accessor_name())
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the reverse relation.
With the example above, when setting ``place.restaurant = restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``value`` is the ``restaurant`` instance on the right of the equal sign
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
# The similarity of the code below to the code in
# ForwardManyToOneDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
if value is None:
# Update the cached related instance (if any) & clear the cache.
# Following the example above, this would be the cached
# ``restaurant`` instance (if any).
rel_obj = self.related.get_cached_value(instance, default=None)
if rel_obj is not None:
# Remove the ``restaurant`` instance from the ``place``
# instance cache.
self.related.delete_cached_value(instance)
# Set the ``place`` field on the ``restaurant``
# instance to None.
setattr(rel_obj, self.related.field.name, None)
elif not isinstance(value, self.related.related_model):
# An object must be an instance of the related class.
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.'
% (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.related_model._meta.object_name,
)
)
else:
if instance._state.db is None:
instance._state.db = router.db_for_write(
instance.__class__, instance=value
)
if value._state.db is None:
value._state.db = router.db_for_write(
value.__class__, instance=instance
)
if not router.allow_relation(value, instance):
raise ValueError(
'Cannot assign "%r": the current database router prevents this '
"relation." % value
)
related_pk = tuple(
getattr(instance, field.attname)
for field in self.related.field.foreign_related_fields
)
# Set the value of the related field to the value of the related
# object's related field.
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Set the related instance cache used by __get__ to avoid an SQL query
# when accessing the attribute we just set.
self.related.set_cached_value(instance, value)
# Set the forward accessor cache on the related object to the current
# instance to avoid an extra SQL query if it's accessed later on.
self.related.field.set_cached_value(value, instance)
def __reduce__(self):
# Same purpose as ForwardManyToOneDescriptor.__reduce__().
return getattr, (self.related.model, self.related.name)
class ReverseManyToOneDescriptor:
"""
Accessor to the related objects manager on the reverse side of a
many-to-one relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``Parent.children`` is a ``ReverseManyToOneDescriptor`` instance.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel):
self.rel = rel
self.field = rel.field
@cached_property
def related_manager_cache_key(self):
# Being able to access the manager instance precludes it from being
# hidden. The rel's accessor name is used to allow multiple managers
# to the same model to coexist. e.g. post.attached_comment_set and
# post.attached_link_set are separately cached.
return self.rel.get_cache_name()
@cached_property
def related_manager_cls(self):
related_model = self.rel.related_model
return create_reverse_many_to_one_manager(
related_model._default_manager.__class__,
self.rel,
)
def __get__(self, instance, cls=None):
"""
Get the related objects through the reverse relation.
With the example above, when getting ``parent.children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``cls`` is the ``Parent`` class (unused)
"""
if instance is None:
return self
key = self.related_manager_cache_key
instance_cache = instance._state.related_managers_cache
if key not in instance_cache:
instance_cache[key] = self.related_manager_cls(instance)
return instance_cache[key]
def _get_set_deprecation_msg_params(self):
return (
"reverse side of a related set",
self.rel.get_accessor_name(),
)
def __set__(self, instance, value):
raise TypeError(
"Direct assignment to the %s is prohibited. Use %s.set() instead."
% self._get_set_deprecation_msg_params(),
)
def create_reverse_many_to_one_manager(superclass, rel):
"""
Create a manager for the reverse side of a many-to-one relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-one relations.
"""
class RelatedManager(superclass):
def __init__(self, instance):
super().__init__()
self.instance = instance
self.model = rel.related_model
self.field = rel.field
self.core_filters = {self.field.name: instance}
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_reverse_many_to_one_manager(manager.__class__, rel)
return manager_class(self.instance)
do_not_call_in_templates = True
def _check_fk_val(self):
for field in self.field.foreign_related_fields:
if getattr(self.instance, field.attname) is None:
raise ValueError(
f'"{self.instance!r}" needs to have a value for field '
f'"{field.attname}" before this relationship can be used.'
)
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[
db
].features.interprets_empty_strings_as_nulls
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset._defer_next_filter = True
queryset = queryset.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == "" and empty_strings_as_null):
return queryset.none()
if self.field.many_to_one:
# Guard against field-like objects such as GenericRelation
# that abuse create_reverse_many_to_one_manager() with reverse
# one-to-many relationships instead and break known related
# objects assignment.
try:
target_field = self.field.target_field
except FieldError:
# The relationship has multiple target fields. Use a tuple
# for related object id.
rel_obj_id = tuple(
[
getattr(self.instance, target_field.attname)
for target_field in self.field.path_infos[-1].target_fields
]
)
else:
rel_obj_id = getattr(self.instance, target_field.attname)
queryset._known_related_objects = {
self.field: {rel_obj_id: self.instance}
}
return queryset
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(
self.field.remote_field.get_cache_name()
)
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if self.instance.pk is None:
raise ValueError(
f"{self.instance.__class__.__name__!r} instance needs to have a "
f"primary key value before this relationship can be used."
)
try:
return self.instance._prefetched_objects_cache[
self.field.remote_field.get_cache_name()
]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = self.field.get_local_related_value
instance_attr = self.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
queryset = _filter_prefetch_queryset(queryset, self.field.name, instances)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
if not self.field.is_cached(rel_obj):
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, self.field.name, instance)
cache_name = self.field.remote_field.get_cache_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name, False
def add(self, *objs, bulk=True):
self._check_fk_val()
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError(
"'%s' instance expected, got %r"
% (
self.model._meta.object_name,
obj,
)
)
setattr(obj, self.field.name, self.instance)
if bulk:
pks = []
for obj in objs:
check_and_update_obj(obj)
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(
**{
self.field.name: self.instance,
}
)
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def create(self, **kwargs):
self._check_fk_val()
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
self._check_fk_val()
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
self._check_fk_val()
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a
# value of null.
if rel.field.null:
def remove(self, *objs, bulk=True):
if not objs:
return
self._check_fk_val()
val = self.field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError(
"'%s' instance expected, got %r"
% (
self.model._meta.object_name,
obj,
)
)
# Is obj actually part of this descriptor set?
if self.field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise self.field.remote_field.model.DoesNotExist(
"%r is not related to %r." % (obj, self.instance)
)
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, *, bulk=True):
self._check_fk_val()
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.update()` is intrinsically atomic.
queryset.update(**{self.field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, self.field.name, None)
obj.save(update_fields=[self.field.name])
_clear.alters_data = True
def set(self, objs, *, bulk=True, clear=False):
self._check_fk_val()
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
if self.field.null:
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear(bulk=bulk)
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs, bulk=bulk)
self.add(*new_objs, bulk=bulk)
else:
self.add(*objs, bulk=bulk)
set.alters_data = True
return RelatedManager
class ManyToManyDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the forward and reverse sides of
a many-to-many relation.
In the example::
class Pizza(Model):
toppings = ManyToManyField(Topping, related_name='pizzas')
``Pizza.toppings`` and ``Topping.pizzas`` are ``ManyToManyDescriptor``
instances.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel, reverse=False):
super().__init__(rel)
self.reverse = reverse
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.rel.through
@cached_property
def related_manager_cls(self):
related_model = self.rel.related_model if self.reverse else self.rel.model
return create_forward_many_to_many_manager(
related_model._default_manager.__class__,
self.rel,
reverse=self.reverse,
)
@cached_property
def related_manager_cache_key(self):
if self.reverse:
# Symmetrical M2Ms won't have an accessor name, but should never
# end up in the reverse branch anyway, as the related_name ends up
# being hidden, and no public manager is created.
return self.rel.get_cache_name()
else:
# For forward managers, defer to the field name.
return self.field.get_cache_name()
def _get_set_deprecation_msg_params(self):
return (
"%s side of a many-to-many set"
% ("reverse" if self.reverse else "forward"),
self.rel.get_accessor_name() if self.reverse else self.field.name,
)
def create_forward_many_to_many_manager(superclass, rel, reverse):
"""
Create a manager for the either side of a many-to-many relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-many relations.
"""
class ManyRelatedManager(superclass):
def __init__(self, instance=None):
super().__init__()
self.instance = instance
if not reverse:
self.model = rel.model
self.query_field_name = rel.field.related_query_name()
self.prefetch_cache_name = rel.field.name
self.source_field_name = rel.field.m2m_field_name()
self.target_field_name = rel.field.m2m_reverse_field_name()
self.symmetrical = rel.symmetrical
else:
self.model = rel.related_model
self.query_field_name = rel.field.name
self.prefetch_cache_name = rel.field.related_query_name()
self.source_field_name = rel.field.m2m_reverse_field_name()
self.target_field_name = rel.field.m2m_field_name()
self.symmetrical = False
self.through = rel.through
self.reverse = reverse
self.source_field = self.through._meta.get_field(self.source_field_name)
self.target_field = self.through._meta.get_field(self.target_field_name)
self.core_filters = {}
self.pk_field_names = {}
for lh_field, rh_field in self.source_field.related_fields:
core_filter_key = "%s__%s" % (self.query_field_name, rh_field.name)
self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)
self.pk_field_names[lh_field.name] = rh_field.name
self.related_val = self.source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError(
'"%r" needs to have a value for field "%s" before '
"this many-to-many relationship can be used."
% (instance, self.pk_field_names[self.source_field_name])
)
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError(
"%r instance needs to have a primary key value before "
"a many-to-many relationship can be used."
% instance.__class__.__name__
)
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_forward_many_to_many_manager(
manager.__class__, rel, reverse
)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q.create([(self.source_field_name, self.related_val)])
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (
not isinstance(removed_vals, QuerySet) or removed_vals._has_filters()
)
if removed_vals_filters:
filters &= Q.create([(f"{self.target_field_name}__in", removed_vals)])
if self.symmetrical:
symmetrical_filters = Q.create(
[(self.target_field_name, self.related_val)]
)
if removed_vals_filters:
symmetrical_filters &= Q.create(
[(f"{self.source_field_name}__in", removed_vals)]
)
filters |= symmetrical_filters
return filters
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset._defer_next_filter = True
return queryset._next_is_sticky().filter(**self.core_filters)
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name)
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
queryset = _filter_prefetch_queryset(
queryset._next_is_sticky(), self.query_field_name, instances
)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = fk.model._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(
select={
"_prefetch_related_val_%s"
% f.attname: "%s.%s"
% (qn(join_table), qn(f.column))
for f in fk.local_related_fields
}
)
return (
queryset,
lambda result: tuple(
getattr(result, "_prefetch_related_val_%s" % f.attname)
for f in fk.local_related_fields
),
lambda inst: tuple(
f.get_db_prep_value(getattr(inst, f.attname), connection)
for f in fk.foreign_related_fields
),
False,
self.prefetch_cache_name,
False,
)
def add(self, *objs, through_defaults=None):
self._remove_prefetched_objects()
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(
self.source_field_name,
self.target_field_name,
*objs,
through_defaults=through_defaults,
)
# If this is a symmetrical m2m relation to self, add the mirror
# entry in the m2m table.
if self.symmetrical:
self._add_items(
self.target_field_name,
self.source_field_name,
*objs,
through_defaults=through_defaults,
)
add.alters_data = True
def remove(self, *objs):
self._remove_prefetched_objects()
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(
sender=self.through,
action="pre_clear",
instance=self.instance,
reverse=self.reverse,
model=self.model,
pk_set=None,
using=db,
)
self._remove_prefetched_objects()
filters = self._build_remove_filters(super().get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(
sender=self.through,
action="post_clear",
instance=self.instance,
reverse=self.reverse,
model=self.model,
pk_set=None,
using=db,
)
clear.alters_data = True
def set(self, objs, *, clear=False, through_defaults=None):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, through_defaults=through_defaults)
else:
old_ids = set(
self.using(db).values_list(
self.target_field.target_field.attname, flat=True
)
)
new_objs = []
for obj in objs:
fk_val = (
self.target_field.get_foreign_related_value(obj)[0]
if isinstance(obj, self.model)
else self.target_field.get_prep_value(obj)
)
if fk_val in old_ids:
old_ids.remove(fk_val)
else:
new_objs.append(obj)
self.remove(*old_ids)
self.add(*new_objs, through_defaults=through_defaults)
set.alters_data = True
def create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj, through_defaults=through_defaults)
return new_obj
create.alters_data = True
def get_or_create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(
**kwargs
)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj, through_defaults=through_defaults)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(
ManyRelatedManager, self.db_manager(db)
).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj, through_defaults=through_defaults)
return obj, created
update_or_create.alters_data = True
def _get_target_ids(self, target_field_name, objs):
"""
Return the set of ids of `objs` that the target field references.
"""
from django.db.models import Model
target_ids = set()
target_field = self.through._meta.get_field(target_field_name)
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", '
'value is on database "%s"'
% (obj, self.instance._state.db, obj._state.db)
)
target_id = target_field.get_foreign_related_value(obj)[0]
if target_id is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None'
% (obj, target_field_name)
)
target_ids.add(target_id)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r"
% (self.model._meta.object_name, obj)
)
else:
target_ids.add(target_field.get_prep_value(obj))
return target_ids
def _get_missing_target_ids(
self, source_field_name, target_field_name, db, target_ids
):
"""
Return the subset of ids of `objs` that aren't already assigned to
this relationship.
"""
vals = (
self.through._default_manager.using(db)
.values_list(target_field_name, flat=True)
.filter(
**{
source_field_name: self.related_val[0],
"%s__in" % target_field_name: target_ids,
}
)
)
return target_ids.difference(vals)
def _get_add_plan(self, db, source_field_name):
"""
Return a boolean triple of the way the add should be performed.
The first element is whether or not bulk_create(ignore_conflicts)
can be used, the second whether or not signals must be sent, and
the third element is whether or not the immediate bulk insertion
with conflicts ignored can be performed.
"""
# Conflicts can be ignored when the intermediary model is
# auto-created as the only possible collision is on the
# (source_id, target_id) tuple. The same assertion doesn't hold for
# user-defined intermediary models as they could have other fields
# causing conflicts which must be surfaced.
can_ignore_conflicts = (
self.through._meta.auto_created is not False
and connections[db].features.supports_ignore_conflicts
)
# Don't send the signal when inserting duplicate data row
# for symmetrical reverse entries.
must_send_signals = (
self.reverse or source_field_name == self.source_field_name
) and (signals.m2m_changed.has_listeners(self.through))
# Fast addition through bulk insertion can only be performed
# if no m2m_changed listeners are connected for self.through
# as they require the added set of ids to be provided via
# pk_set.
return (
can_ignore_conflicts,
must_send_signals,
(can_ignore_conflicts and not must_send_signals),
)
def _add_items(
self, source_field_name, target_field_name, *objs, through_defaults=None
):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys
# of object instances.
if not objs:
return
through_defaults = dict(resolve_callables(through_defaults or {}))
target_ids = self._get_target_ids(target_field_name, objs)
db = router.db_for_write(self.through, instance=self.instance)
can_ignore_conflicts, must_send_signals, can_fast_add = self._get_add_plan(
db, source_field_name
)
if can_fast_add:
self.through._default_manager.using(db).bulk_create(
[
self.through(
**{
"%s_id" % source_field_name: self.related_val[0],
"%s_id" % target_field_name: target_id,
}
)
for target_id in target_ids
],
ignore_conflicts=True,
)
return
missing_target_ids = self._get_missing_target_ids(
source_field_name, target_field_name, db, target_ids
)
with transaction.atomic(using=db, savepoint=False):
if must_send_signals:
signals.m2m_changed.send(
sender=self.through,
action="pre_add",
instance=self.instance,
reverse=self.reverse,
model=self.model,
pk_set=missing_target_ids,
using=db,
)
# Add the ones that aren't there already.
self.through._default_manager.using(db).bulk_create(
[
self.through(
**through_defaults,
**{
"%s_id" % source_field_name: self.related_val[0],
"%s_id" % target_field_name: target_id,
},
)
for target_id in missing_target_ids
],
ignore_conflicts=can_ignore_conflicts,
)
if must_send_signals:
signals.m2m_changed.send(
sender=self.through,
action="post_add",
instance=self.instance,
reverse=self.reverse,
model=self.model,
pk_set=missing_target_ids,
using=db,
)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove. Either object instances, or primary
# keys of object instances.
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(
sender=self.through,
action="pre_remove",
instance=self.instance,
reverse=self.reverse,
model=self.model,
pk_set=old_ids,
using=db,
)
target_model_qs = super().get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(
**{"%s__in" % self.target_field.target_field.attname: old_ids}
)
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(
sender=self.through,
action="post_remove",
instance=self.instance,
reverse=self.reverse,
model=self.model,
pk_set=old_ids,
using=db,
)
return ManyRelatedManager
|
b59151d501d35a639e2902c95ba4e7492e68d60fc1f1c08654c39422abd3077a | """
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import copy
import difflib
import functools
import sys
from collections import Counter, namedtuple
from collections.abc import Iterator, Mapping
from itertools import chain, count, product
from string import ascii_uppercase
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections
from django.db.models.aggregates import Count
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import (
BaseExpression,
Col,
Exists,
F,
OuterRef,
Ref,
ResolvedOuterRef,
Value,
)
from django.db.models.fields import Field
from django.db.models.fields.related_lookups import MultiColSource
from django.db.models.lookups import Lookup
from django.db.models.query_utils import (
Q,
check_rel_lookup_compatibility,
refs_expression,
)
from django.db.models.sql.constants import INNER, LOUTER, ORDER_DIR, SINGLE
from django.db.models.sql.datastructures import BaseTable, Empty, Join, MultiJoin
from django.db.models.sql.where import AND, OR, ExtraWhere, NothingNode, WhereNode
from django.utils.functional import cached_property
from django.utils.regex_helper import _lazy_re_compile
from django.utils.tree import Node
__all__ = ["Query", "RawQuery"]
# Quotation marks ('"`[]), whitespace characters, semicolons, or inline
# SQL comments are forbidden in column aliases.
FORBIDDEN_ALIAS_PATTERN = _lazy_re_compile(r"['`\"\]\[;\s]|--|/\*|\*/")
# Inspired from
# https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS
EXPLAIN_OPTIONS_PATTERN = _lazy_re_compile(r"[\w\-]+")
def get_field_names_from_opts(opts):
if opts is None:
return set()
return set(
chain.from_iterable(
(f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields()
)
)
def get_children_from_q(q):
for child in q.children:
if isinstance(child, Node):
yield from get_children_from_q(child)
else:
yield child
JoinInfo = namedtuple(
"JoinInfo",
("final_field", "targets", "opts", "joins", "path", "transform_function"),
)
class RawQuery:
"""A single raw SQL query."""
def __init__(self, sql, using, params=()):
self.params = params
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.annotation_select = {}
def chain(self, using):
return self.clone(using)
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.identifier_converter
return [converter(column_meta[0]) for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
@property
def params_type(self):
if self.params is None:
return None
return dict if isinstance(self.params, Mapping) else tuple
def __str__(self):
if self.params_type is None:
return self.sql
return self.sql % self.params_type(self.params)
def _execute_query(self):
connection = connections[self.using]
# Adapt parameters to the database, as much as possible considering
# that the target type isn't known. See #17755.
params_type = self.params_type
adapter = connection.ops.adapt_unknown_value
if params_type is tuple:
params = tuple(adapter(val) for val in self.params)
elif params_type is dict:
params = {key: adapter(val) for key, val in self.params.items()}
elif params_type is None:
params = None
else:
raise RuntimeError("Unexpected params type: %s" % params_type)
self.cursor = connection.cursor()
self.cursor.execute(self.sql, params)
ExplainInfo = namedtuple("ExplainInfo", ("format", "options"))
class Query(BaseExpression):
"""A single SQL query."""
alias_prefix = "T"
empty_result_set_value = None
subq_aliases = frozenset([alias_prefix])
compiler = "SQLCompiler"
base_table_class = BaseTable
join_class = Join
default_cols = True
default_ordering = True
standard_ordering = True
filter_is_sticky = False
subquery = False
# SQL-related attributes.
# Select and related select clauses are expressions to use in the SELECT
# clause of the query. The select is used for cases where we want to set up
# the select clause to contain other than default fields (values(),
# subqueries...). Note that annotations go to annotations dictionary.
select = ()
# The group_by attribute can have one of the following forms:
# - None: no group by at all in the query
# - A tuple of expressions: group by (at least) those expressions.
# String refs are also allowed for now.
# - True: group by all select fields of the model
# See compiler.get_group_by() for details.
group_by = None
order_by = ()
low_mark = 0 # Used for offset/limit.
high_mark = None # Used for offset/limit.
distinct = False
distinct_fields = ()
select_for_update = False
select_for_update_nowait = False
select_for_update_skip_locked = False
select_for_update_of = ()
select_for_no_key_update = False
select_related = False
# Arbitrary limit for select_related to prevents infinite recursion.
max_depth = 5
# Holds the selects defined by a call to values() or values_list()
# excluding annotation_select and extra_select.
values_select = ()
# SQL annotation-related attributes.
annotation_select_mask = None
_annotation_select_cache = None
# Set combination attributes.
combinator = None
combinator_all = False
combined_queries = ()
# These are for extensions. The contents are more or less appended verbatim
# to the appropriate clause.
extra_select_mask = None
_extra_select_cache = None
extra_tables = ()
extra_order_by = ()
# A tuple that is a set of model field names and either True, if these are
# the fields to defer, or False if these are the only fields to load.
deferred_loading = (frozenset(), True)
explain_info = None
def __init__(self, model, alias_cols=True):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# types they are. The key is the alias of the joined table (possibly
# the table name) and the value is a Join-like object (see
# sql.datastructures.Join for more information).
self.alias_map = {}
# Whether to provide alias to columns during reference resolving.
self.alias_cols = alias_cols
# Sometimes the query contains references to aliases in outer queries (as
# a result of split_exclude). Correct alias quoting needs to know these
# aliases too.
# Map external tables to whether they are aliased.
self.external_aliases = {}
self.table_map = {} # Maps table names to list of aliases.
self.used_aliases = set()
self.where = WhereNode()
# Maps alias -> Annotation Expression.
self.annotations = {}
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = {} # Maps col_alias -> (col_sql, params).
self._filtered_relations = {}
@property
def output_field(self):
if len(self.select) == 1:
select = self.select[0]
return getattr(select, "target", None) or select.field
elif len(self.annotation_select) == 1:
return next(iter(self.annotation_select.values())).output_field
@property
def has_select_fields(self):
return bool(
self.select or self.annotation_select_mask or self.extra_select_mask
)
@cached_property
def base_table(self):
for alias in self.alias_map:
return alias
def __str__(self):
"""
Return the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Return the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
"""Limit the amount of work when a Query is deepcopied."""
result = self.clone()
memo[id(self)] = result
return result
def get_compiler(self, using=None, connection=None, elide_empty=True):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
return connection.ops.compiler(self.compiler)(
self, connection, using, elide_empty
)
def get_meta(self):
"""
Return the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
if self.model:
return self.model._meta
def clone(self):
"""
Return a copy of the current Query. A lightweight alternative to
deepcopy().
"""
obj = Empty()
obj.__class__ = self.__class__
# Copy references to everything.
obj.__dict__ = self.__dict__.copy()
# Clone attributes that can't use shallow copy.
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.external_aliases = self.external_aliases.copy()
obj.table_map = self.table_map.copy()
obj.where = self.where.clone()
obj.annotations = self.annotations.copy()
if self.annotation_select_mask is not None:
obj.annotation_select_mask = self.annotation_select_mask.copy()
if self.combined_queries:
obj.combined_queries = tuple(
[query.clone() for query in self.combined_queries]
)
# _annotation_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both annotations and
# _annotation_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._annotation_select_cache = None
obj.extra = self.extra.copy()
if self.extra_select_mask is not None:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is not None:
obj._extra_select_cache = self._extra_select_cache.copy()
if self.select_related is not False:
# Use deepcopy because select_related stores fields in nested
# dicts.
obj.select_related = copy.deepcopy(obj.select_related)
if "subq_aliases" in self.__dict__:
obj.subq_aliases = self.subq_aliases.copy()
obj.used_aliases = self.used_aliases.copy()
obj._filtered_relations = self._filtered_relations.copy()
# Clear the cached_property, if it exists.
obj.__dict__.pop("base_table", None)
return obj
def chain(self, klass=None):
"""
Return a copy of the current Query that's ready for another operation.
The klass argument changes the type of the Query, e.g. UpdateQuery.
"""
obj = self.clone()
if klass and obj.__class__ != klass:
obj.__class__ = klass
if not obj.filter_is_sticky:
obj.used_aliases = set()
obj.filter_is_sticky = False
if hasattr(obj, "_setup_query"):
obj._setup_query()
return obj
def relabeled_clone(self, change_map):
clone = self.clone()
clone.change_aliases(change_map)
return clone
def _get_col(self, target, field, alias):
if not self.alias_cols:
alias = None
return target.get_col(alias, field)
def rewrite_cols(self, annotation, col_cnt):
# We must make sure the inner query has the referred columns in it.
# If we are aggregating over an annotation, then Django uses Ref()
# instances to note this. However, if we are annotating over a column
# of a related model, then it might be that column isn't part of the
# SELECT clause of the inner query, and we must manually make sure
# the column is selected. An example case is:
# .aggregate(Sum('author__awards'))
# Resolving this expression results in a join to author, but there
# is no guarantee the awards column of author is in the select clause
# of the query. Thus we must manually add the column to the inner
# query.
orig_exprs = annotation.get_source_expressions()
new_exprs = []
for expr in orig_exprs:
# FIXME: These conditions are fairly arbitrary. Identify a better
# method of having expressions decide which code path they should
# take.
if isinstance(expr, Ref):
# Its already a Ref to subquery (see resolve_ref() for
# details)
new_exprs.append(expr)
elif isinstance(expr, (WhereNode, Lookup)):
# Decompose the subexpressions further. The code here is
# copied from the else clause, but this condition must appear
# before the contains_aggregate/is_summary condition below.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
else:
# Reuse aliases of expressions already selected in subquery.
for col_alias, selected_annotation in self.annotation_select.items():
if selected_annotation is expr:
new_expr = Ref(col_alias, expr)
break
else:
# An expression that is not selected the subquery.
if isinstance(expr, Col) or (
expr.contains_aggregate and not expr.is_summary
):
# Reference column or another aggregate. Select it
# under a non-conflicting alias.
col_cnt += 1
col_alias = "__col%d" % col_cnt
self.annotations[col_alias] = expr
self.append_annotation_mask([col_alias])
new_expr = Ref(col_alias, expr)
else:
# Some other expression not referencing database values
# directly. Its subexpression might contain Cols.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
annotation.set_source_expressions(new_exprs)
return annotation, col_cnt
def get_aggregation(self, using, added_aggregate_names):
"""
Return the dictionary with the values of the existing aggregations.
"""
if not self.annotation_select:
return {}
existing_annotations = [
annotation
for alias, annotation in self.annotations.items()
if alias not in added_aggregate_names
]
# Decide if we need to use a subquery.
#
# Existing annotations would cause incorrect results as get_aggregation()
# must produce just one result and thus must not use GROUP BY. But we
# aren't smart enough to remove the existing annotations from the
# query, so those would force us to use GROUP BY.
#
# If the query has limit or distinct, or uses set operations, then
# those operations must be done in a subquery so that the query
# aggregates on the limit and/or distinct results instead of applying
# the distinct and limit after the aggregation.
if (
isinstance(self.group_by, tuple)
or self.is_sliced
or existing_annotations
or self.distinct
or self.combinator
):
from django.db.models.sql.subqueries import AggregateQuery
inner_query = self.clone()
inner_query.subquery = True
outer_query = AggregateQuery(self.model, inner_query)
inner_query.select_for_update = False
inner_query.select_related = False
inner_query.set_annotation_mask(self.annotation_select)
# Queries with distinct_fields need ordering and when a limit is
# applied we must take the slice from the ordered query. Otherwise
# no need for ordering.
inner_query.clear_ordering(force=False)
if not inner_query.distinct:
# If the inner query uses default select and it has some
# aggregate annotations, then we must make sure the inner
# query is grouped by the main model's primary key. However,
# clearing the select clause can alter results if distinct is
# used.
has_existing_aggregate_annotations = any(
annotation
for annotation in existing_annotations
if getattr(annotation, "contains_aggregate", True)
)
if inner_query.default_cols and has_existing_aggregate_annotations:
inner_query.group_by = (
self.model._meta.pk.get_col(inner_query.get_initial_alias()),
)
inner_query.default_cols = False
relabels = {t: "subquery" for t in inner_query.alias_map}
relabels[None] = "subquery"
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
col_cnt = 0
for alias, expression in list(inner_query.annotation_select.items()):
annotation_select_mask = inner_query.annotation_select_mask
if expression.is_summary:
expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt)
outer_query.annotations[alias] = expression.relabeled_clone(
relabels
)
del inner_query.annotations[alias]
annotation_select_mask.remove(alias)
# Make sure the annotation_select wont use cached results.
inner_query.set_annotation_mask(inner_query.annotation_select_mask)
if (
inner_query.select == ()
and not inner_query.default_cols
and not inner_query.annotation_select_mask
):
# In case of Model.objects[0:3].count(), there would be no
# field selected in the inner query, yet we must use a subquery.
# So, make sure at least one field is selected.
inner_query.select = (
self.model._meta.pk.get_col(inner_query.get_initial_alias()),
)
else:
outer_query = self
self.select = ()
self.default_cols = False
self.extra = {}
empty_set_result = [
expression.empty_result_set_value
for expression in outer_query.annotation_select.values()
]
elide_empty = not any(result is NotImplemented for result in empty_set_result)
outer_query.clear_ordering(force=True)
outer_query.clear_limits()
outer_query.select_for_update = False
outer_query.select_related = False
compiler = outer_query.get_compiler(using, elide_empty=elide_empty)
result = compiler.execute_sql(SINGLE)
if result is None:
result = empty_set_result
converters = compiler.get_converters(outer_query.annotation_select.values())
result = next(compiler.apply_converters((result,), converters))
return dict(zip(outer_query.annotation_select, result))
def get_count(self, using):
"""
Perform a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count("*"), alias="__count", is_summary=True)
return obj.get_aggregation(using, ["__count"])["__count"]
def has_filters(self):
return self.where
def exists(self, using, limit=True):
q = self.clone()
if not (q.distinct and q.is_sliced):
if q.group_by is True:
q.add_fields(
(f.attname for f in self.model._meta.concrete_fields), False
)
# Disable GROUP BY aliases to avoid orphaning references to the
# SELECT clause which is about to be cleared.
q.set_group_by(allow_aliases=False)
q.clear_select_clause()
if q.combined_queries and q.combinator == "union":
limit_combined = connections[
using
].features.supports_slicing_ordering_in_compound
q.combined_queries = tuple(
combined_query.exists(using, limit=limit_combined)
for combined_query in q.combined_queries
)
q.clear_ordering(force=True)
if limit:
q.set_limits(high=1)
q.add_annotation(Value(1), "a")
return q
def has_results(self, using):
q = self.exists(using)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def explain(self, using, format=None, **options):
q = self.clone()
for option_name in options:
if (
not EXPLAIN_OPTIONS_PATTERN.fullmatch(option_name)
or "--" in option_name
):
raise ValueError(f"Invalid option name: {option_name!r}.")
q.explain_info = ExplainInfo(format, options)
compiler = q.get_compiler(using=using)
return "\n".join(compiler.explain_query())
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
if self.model != rhs.model:
raise TypeError("Cannot combine queries on two different base models.")
if self.is_sliced:
raise TypeError("Cannot combine queries once a slice has been taken.")
if self.distinct != rhs.distinct:
raise TypeError("Cannot combine a unique query with a non-unique query.")
if self.distinct_fields != rhs.distinct_fields:
raise TypeError("Cannot combine queries with different distinct fields.")
# If lhs and rhs shares the same alias prefix, it is possible to have
# conflicting alias changes like T4 -> T5, T5 -> T6, which might end up
# as T4 -> T6 while combining two querysets. To prevent this, change an
# alias prefix of the rhs and update current aliases accordingly,
# except if the alias is the base table since it must be present in the
# query on both sides.
initial_alias = self.get_initial_alias()
rhs.bump_prefix(self, exclude={initial_alias})
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = connector == AND
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.alias_map)
joinpromoter = JoinPromoter(connector, 2, False)
joinpromoter.add_votes(
j for j in self.alias_map if self.alias_map[j].join_type == INNER
)
rhs_votes = set()
# Now, add the joins from rhs query into the new query (skipping base
# table).
rhs_tables = list(rhs.alias_map)[1:]
for alias in rhs_tables:
join = rhs.alias_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
join = join.relabeled_clone(change_map)
new_alias = self.join(join, reuse=reuse)
if join.join_type == INNER:
rhs_votes.add(new_alias)
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
if alias != new_alias:
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
joinpromoter.add_votes(rhs_votes)
joinpromoter.update_join_types(self)
# Combine subqueries aliases to ensure aliases relabelling properly
# handle subqueries when combining where and select clauses.
self.subq_aliases |= rhs.subq_aliases
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
w = rhs.where.clone()
w.relabel_aliases(change_map)
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
if rhs.select:
self.set_select([col.relabeled_clone(change_map) for col in rhs.select])
else:
self.select = ()
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError(
"When merging querysets using 'or', you cannot have "
"extra(select=...) on both sides."
)
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by or self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def _get_defer_select_mask(self, opts, mask, select_mask=None):
if select_mask is None:
select_mask = {}
select_mask[opts.pk] = {}
# All concrete fields that are not part of the defer mask must be
# loaded. If a relational field is encountered it gets added to the
# mask for it be considered if `select_related` and the cycle continues
# by recursively caling this function.
for field in opts.concrete_fields:
field_mask = mask.pop(field.name, None)
if field_mask is None:
select_mask.setdefault(field, {})
elif field_mask:
if not field.is_relation:
raise FieldError(next(iter(field_mask)))
field_select_mask = select_mask.setdefault(field, {})
related_model = field.remote_field.model._meta.concrete_model
self._get_defer_select_mask(
related_model._meta, field_mask, field_select_mask
)
# Remaining defer entries must be references to reverse relationships.
# The following code is expected to raise FieldError if it encounters
# a malformed defer entry.
for field_name, field_mask in mask.items():
if filtered_relation := self._filtered_relations.get(field_name):
relation = opts.get_field(filtered_relation.relation_name)
field_select_mask = select_mask.setdefault((field_name, relation), {})
field = relation.field
else:
field = opts.get_field(field_name).field
field_select_mask = select_mask.setdefault(field, {})
related_model = field.model._meta.concrete_model
self._get_defer_select_mask(
related_model._meta, field_mask, field_select_mask
)
return select_mask
def _get_only_select_mask(self, opts, mask, select_mask=None):
if select_mask is None:
select_mask = {}
select_mask[opts.pk] = {}
# Only include fields mentioned in the mask.
for field_name, field_mask in mask.items():
field = opts.get_field(field_name)
field_select_mask = select_mask.setdefault(field, {})
if field_mask:
if not field.is_relation:
raise FieldError(next(iter(field_mask)))
related_model = field.remote_field.model._meta.concrete_model
self._get_only_select_mask(
related_model._meta, field_mask, field_select_mask
)
return select_mask
def get_select_mask(self):
"""
Convert the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
"""
field_names, defer = self.deferred_loading
if not field_names:
return {}
mask = {}
for field_name in field_names:
part_mask = mask
for part in field_name.split(LOOKUP_SEP):
part_mask = part_mask.setdefault(part, {})
opts = self.get_meta()
if defer:
return self._get_defer_select_mask(opts, mask)
return self._get_only_select_mask(opts, mask)
def table_alias(self, table_name, create=False, filtered_relation=None):
"""
Return a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = "%s%d" % (self.alias_prefix, len(self.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = (
filtered_relation.alias if filtered_relation is not None else table_name
)
self.table_map[table_name] = [alias]
self.alias_refcount[alias] = 1
return alias, True
def ref_alias(self, alias):
"""Increases the reference count for this alias."""
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
"""Decreases the reference count for this alias."""
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promote recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, only promote the join if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
then we must also promote b->c automatically, or otherwise the promotion
of a->b doesn't actually change anything in the query results.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
# Only the first alias (skipped above) should have None join_type
assert self.alias_map[alias].join_type is not None
parent_alias = self.alias_map[alias].parent_alias
parent_louter = (
parent_alias and self.alias_map[parent_alias].join_type == LOUTER
)
already_louter = self.alias_map[alias].join_type == LOUTER
if (self.alias_map[alias].nullable or parent_louter) and not already_louter:
self.alias_map[alias] = self.alias_map[alias].promote()
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join
for join in self.alias_map
if self.alias_map[join].parent_alias == alias
and join not in aliases
)
def demote_joins(self, aliases):
"""
Change join type from LOUTER to INNER for all joins in aliases.
Similarly to promote_joins(), this method must ensure no join chains
containing first an outer, then an inner join are generated. If we
are demoting b->c join in chain a LOUTER b LOUTER c then we must
demote a->b automatically, or otherwise the demotion of b->c doesn't
actually change anything in the query results. .
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type == LOUTER:
self.alias_map[alias] = self.alias_map[alias].demote()
parent_alias = self.alias_map[alias].parent_alias
if self.alias_map[parent_alias].join_type == INNER:
aliases.append(parent_alias)
def reset_refcounts(self, to_counts):
"""
Reset reference counts for aliases so that they match the value passed
in `to_counts`.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def change_aliases(self, change_map):
"""
Change the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
# If keys and values of change_map were to intersect, an alias might be
# updated twice (e.g. T4 -> T5, T5 -> T6, so also T4 -> T6) depending
# on their order in change_map.
assert set(change_map).isdisjoint(change_map.values())
# 1. Update references in "select" (normal columns plus aliases),
# "group by" and "where".
self.where.relabel_aliases(change_map)
if isinstance(self.group_by, tuple):
self.group_by = tuple(
[col.relabeled_clone(change_map) for col in self.group_by]
)
self.select = tuple([col.relabeled_clone(change_map) for col in self.select])
self.annotations = self.annotations and {
key: col.relabeled_clone(change_map)
for key, col in self.annotations.items()
}
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.items():
if old_alias not in self.alias_map:
continue
alias_data = self.alias_map[old_alias].relabeled_clone(change_map)
self.alias_map[new_alias] = alias_data
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
self.external_aliases = {
# Table is aliased or it's being changed and thus is aliased.
change_map.get(alias, alias): (aliased or alias in change_map)
for alias, aliased in self.external_aliases.items()
}
def bump_prefix(self, other_query, exclude=None):
"""
Change the alias prefix to the next letter in the alphabet in a way
that the other query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call. To prevent changing aliases use the exclude parameter.
"""
def prefix_gen():
"""
Generate a sequence of characters in alphabetical order:
-> 'A', 'B', 'C', ...
When the alphabet is finished, the sequence will continue with the
Cartesian product:
-> 'AA', 'AB', 'AC', ...
"""
alphabet = ascii_uppercase
prefix = chr(ord(self.alias_prefix) + 1)
yield prefix
for n in count(1):
seq = alphabet[alphabet.index(prefix) :] if prefix else alphabet
for s in product(seq, repeat=n):
yield "".join(s)
prefix = None
if self.alias_prefix != other_query.alias_prefix:
# No clashes between self and outer query should be possible.
return
# Explicitly avoid infinite loop. The constant divider is based on how
# much depth recursive subquery references add to the stack. This value
# might need to be adjusted when adding or removing function calls from
# the code path in charge of performing these operations.
local_recursion_limit = sys.getrecursionlimit() // 16
for pos, prefix in enumerate(prefix_gen()):
if prefix not in self.subq_aliases:
self.alias_prefix = prefix
break
if pos > local_recursion_limit:
raise RecursionError(
"Maximum recursion depth exceeded: too many subqueries."
)
self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
other_query.subq_aliases = other_query.subq_aliases.union(self.subq_aliases)
if exclude is None:
exclude = {}
self.change_aliases(
{
alias: "%s%d" % (self.alias_prefix, pos)
for pos, alias in enumerate(self.alias_map)
if alias not in exclude
}
)
def get_initial_alias(self):
"""
Return the first alias for this query, after increasing its reference
count.
"""
if self.alias_map:
alias = self.base_table
self.ref_alias(alias)
elif self.model:
alias = self.join(self.base_table_class(self.get_meta().db_table, None))
else:
alias = None
return alias
def count_active_tables(self):
"""
Return the number of tables in this query with a non-zero reference
count. After execution, the reference counts are zeroed, so tables
added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, join, reuse=None, reuse_with_filtered_relation=False):
"""
Return an alias for the 'join', either reusing an existing alias for
that join or creating a new one. 'join' is either a base_table_class or
join_class.
The 'reuse' parameter can be either None which means all joins are
reusable, or it can be a set containing the aliases that can be reused.
The 'reuse_with_filtered_relation' parameter is used when computing
FilteredRelation instances.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new
joins are created as LOUTER if the join is nullable.
"""
if reuse_with_filtered_relation and reuse:
reuse_aliases = [
a for a, j in self.alias_map.items() if a in reuse and j.equals(join)
]
else:
reuse_aliases = [
a
for a, j in self.alias_map.items()
if (reuse is None or a in reuse) and j == join
]
if reuse_aliases:
if join.table_alias in reuse_aliases:
reuse_alias = join.table_alias
else:
# Reuse the most recent alias of the joined table
# (a many-to-many relation may be joined multiple times).
reuse_alias = reuse_aliases[-1]
self.ref_alias(reuse_alias)
return reuse_alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(
join.table_name, create=True, filtered_relation=join.filtered_relation
)
if join.join_type:
if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:
join_type = LOUTER
else:
join_type = INNER
join.join_type = join_type
join.table_alias = alias
self.alias_map[alias] = join
return alias
def join_parent_model(self, opts, model, alias, seen):
"""
Make sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if not chain:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
curr_opts = int_model._meta
alias = seen[int_model]
continue
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
join_info = self.setup_joins([link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = join_info.joins[-1]
return alias or seen[None]
def check_alias(self, alias):
if FORBIDDEN_ALIAS_PATTERN.search(alias):
raise ValueError(
"Column aliases cannot contain whitespace characters, quotation marks, "
"semicolons, or SQL comments."
)
def add_annotation(self, annotation, alias, is_summary=False, select=True):
"""Add a single annotation expression to the Query."""
self.check_alias(alias)
annotation = annotation.resolve_expression(
self, allow_joins=True, reuse=None, summarize=is_summary
)
if select:
self.append_annotation_mask([alias])
else:
self.set_annotation_mask(set(self.annotation_select).difference({alias}))
self.annotations[alias] = annotation
def resolve_expression(self, query, *args, **kwargs):
clone = self.clone()
# Subqueries need to use a different set of aliases than the outer query.
clone.bump_prefix(query)
clone.subquery = True
clone.where.resolve_expression(query, *args, **kwargs)
# Resolve combined queries.
if clone.combinator:
clone.combined_queries = tuple(
[
combined_query.resolve_expression(query, *args, **kwargs)
for combined_query in clone.combined_queries
]
)
for key, value in clone.annotations.items():
resolved = value.resolve_expression(query, *args, **kwargs)
if hasattr(resolved, "external_aliases"):
resolved.external_aliases.update(clone.external_aliases)
clone.annotations[key] = resolved
# Outer query's aliases are considered external.
for alias, table in query.alias_map.items():
clone.external_aliases[alias] = (
isinstance(table, Join)
and table.join_field.related_model._meta.db_table != alias
) or (
isinstance(table, BaseTable) and table.table_name != table.table_alias
)
return clone
def get_external_cols(self):
exprs = chain(self.annotations.values(), self.where.children)
return [
col
for col in self._gen_cols(exprs, include_external=True)
if col.alias in self.external_aliases
]
def get_group_by_cols(self, alias=None):
if alias:
return [Ref(alias, self)]
external_cols = self.get_external_cols()
if any(col.possibly_multivalued for col in external_cols):
return [self]
return external_cols
def as_sql(self, compiler, connection):
# Some backends (e.g. Oracle) raise an error when a subquery contains
# unnecessary ORDER BY clause.
if (
self.subquery
and not connection.features.ignores_unnecessary_order_by_in_subqueries
):
self.clear_ordering(force=False)
for query in self.combined_queries:
query.clear_ordering(force=False)
sql, params = self.get_compiler(connection=connection).as_sql()
if self.subquery:
sql = "(%s)" % sql
return sql, params
def resolve_lookup_value(self, value, can_reuse, allow_joins):
if hasattr(value, "resolve_expression"):
value = value.resolve_expression(
self,
reuse=can_reuse,
allow_joins=allow_joins,
)
elif isinstance(value, (list, tuple)):
# The items of the iterable may be expressions and therefore need
# to be resolved independently.
values = (
self.resolve_lookup_value(sub_value, can_reuse, allow_joins)
for sub_value in value
)
type_ = type(value)
if hasattr(type_, "_make"): # namedtuple
return type_(*values)
return type_(values)
return value
def solve_lookup_type(self, lookup):
"""
Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains').
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self.annotations:
expression, expression_lookups = refs_expression(
lookup_splitted, self.annotations
)
if expression:
return expression_lookups, (), expression
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
field_parts = lookup_splitted[0 : len(lookup_splitted) - len(lookup_parts)]
if len(lookup_parts) > 1 and not field_parts:
raise FieldError(
'Invalid lookup "%s" for model %s".'
% (lookup, self.get_meta().model.__name__)
)
return lookup_parts, field_parts, False
def check_query_object_type(self, value, opts, field):
"""
Check whether the object passed while querying is of the correct type.
If not, raise a ValueError specifying the wrong object.
"""
if hasattr(value, "_meta"):
if not check_rel_lookup_compatibility(value._meta.model, opts, field):
raise ValueError(
'Cannot query "%s": Must be "%s" instance.'
% (value, opts.object_name)
)
def check_related_objects(self, field, value, opts):
"""Check the type of object passed to query relations."""
if field.is_relation:
# Check that the field and the queryset use the same model in a
# query like .filter(author=Author.objects.all()). For example, the
# opts would be Author's (from the author field) and value.model
# would be Author.objects.all() queryset's .model (Author also).
# The field is the related field on the lhs side.
if (
isinstance(value, Query)
and not value.has_select_fields
and not check_rel_lookup_compatibility(value.model, opts, field)
):
raise ValueError(
'Cannot use QuerySet for "%s": Use a QuerySet for "%s".'
% (value.model._meta.object_name, opts.object_name)
)
elif hasattr(value, "_meta"):
self.check_query_object_type(value, opts, field)
elif hasattr(value, "__iter__"):
for v in value:
self.check_query_object_type(v, opts, field)
def check_filterable(self, expression):
"""Raise an error if expression cannot be used in a WHERE clause."""
if hasattr(expression, "resolve_expression") and not getattr(
expression, "filterable", True
):
raise NotSupportedError(
expression.__class__.__name__ + " is disallowed in the filter "
"clause."
)
if hasattr(expression, "get_source_expressions"):
for expr in expression.get_source_expressions():
self.check_filterable(expr)
def build_lookup(self, lookups, lhs, rhs):
"""
Try to extract transforms and lookup from given lhs.
The lhs value is something that works like SQLExpression.
The rhs value is what the lookup is going to compare against.
The lookups is a list of names to extract using get_lookup()
and get_transform().
"""
# __exact is the default lookup if one isn't given.
*transforms, lookup_name = lookups or ["exact"]
for name in transforms:
lhs = self.try_transform(lhs, name)
# First try get_lookup() so that the lookup takes precedence if the lhs
# supports both transform and lookup for the name.
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
if lhs.field.is_relation:
raise FieldError(
"Related Field got invalid lookup: {}".format(lookup_name)
)
# A lookup wasn't found. Try to interpret the name as a transform
# and do an Exact lookup against it.
lhs = self.try_transform(lhs, lookup_name)
lookup_name = "exact"
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
return
lookup = lookup_class(lhs, rhs)
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value unless the lookup supports it.
if lookup.rhs is None and not lookup.can_use_none_as_rhs:
if lookup_name not in ("exact", "iexact"):
raise ValueError("Cannot use None as a query value")
return lhs.get_lookup("isnull")(lhs, True)
# For Oracle '' is equivalent to null. The check must be done at this
# stage because join promotion can't be done in the compiler. Using
# DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here.
# A similar thing is done in is_nullable(), too.
if (
lookup_name == "exact"
and lookup.rhs == ""
and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
):
return lhs.get_lookup("isnull")(lhs, True)
return lookup
def try_transform(self, lhs, name):
"""
Helper method for build_lookup(). Try to fetch and initialize
a transform for name parameter from lhs.
"""
transform_class = lhs.get_transform(name)
if transform_class:
return transform_class(lhs)
else:
output_field = lhs.output_field.__class__
suggested_lookups = difflib.get_close_matches(
name, output_field.get_lookups()
)
if suggested_lookups:
suggestion = ", perhaps you meant %s?" % " or ".join(suggested_lookups)
else:
suggestion = "."
raise FieldError(
"Unsupported lookup '%s' for %s or join on the field not "
"permitted%s" % (name, output_field.__name__, suggestion)
)
def build_filter(
self,
filter_expr,
branch_negated=False,
current_negated=False,
can_reuse=None,
allow_joins=True,
split_subq=True,
reuse_with_filtered_relation=False,
check_filterable=True,
):
"""
Build a WhereNode for a single filter clause but don't add it
to this Query. Query.add_q() will then add this filter to the where
Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_negated and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
If 'reuse_with_filtered_relation' is True, then only joins in can_reuse
will be reused.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
if isinstance(filter_expr, dict):
raise FieldError("Cannot parse keyword query as dict")
if isinstance(filter_expr, Q):
return self._add_q(
filter_expr,
branch_negated=branch_negated,
current_negated=current_negated,
used_aliases=can_reuse,
allow_joins=allow_joins,
split_subq=split_subq,
check_filterable=check_filterable,
)
if hasattr(filter_expr, "resolve_expression"):
if not getattr(filter_expr, "conditional", False):
raise TypeError("Cannot filter against a non-conditional expression.")
condition = filter_expr.resolve_expression(self, allow_joins=allow_joins)
if not isinstance(condition, Lookup):
condition = self.build_lookup(["exact"], condition, True)
return WhereNode([condition], connector=AND), []
arg, value = filter_expr
if not arg:
raise FieldError("Cannot parse keyword query %r" % arg)
lookups, parts, reffed_expression = self.solve_lookup_type(arg)
if check_filterable:
self.check_filterable(reffed_expression)
if not allow_joins and len(parts) > 1:
raise FieldError("Joined field references are not permitted in this query")
pre_joins = self.alias_refcount.copy()
value = self.resolve_lookup_value(value, can_reuse, allow_joins)
used_joins = {
k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)
}
if check_filterable:
self.check_filterable(value)
if reffed_expression:
condition = self.build_lookup(lookups, reffed_expression, value)
return WhereNode([condition], connector=AND), []
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated or not split_subq
try:
join_info = self.setup_joins(
parts,
opts,
alias,
can_reuse=can_reuse,
allow_many=allow_many,
reuse_with_filtered_relation=reuse_with_filtered_relation,
)
# Prevent iterator from being consumed by check_related_objects()
if isinstance(value, Iterator):
value = list(value)
self.check_related_objects(join_info.final_field, value, join_info.opts)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_info.joins
except MultiJoin as e:
return self.split_exclude(filter_expr, can_reuse, e.names_with_path)
# Update used_joins before trimming since they are reused to determine
# which joins could be later promoted to INNER.
used_joins.update(join_info.joins)
targets, alias, join_list = self.trim_joins(
join_info.targets, join_info.joins, join_info.path
)
if can_reuse is not None:
can_reuse.update(join_list)
if join_info.final_field.is_relation:
# No support for transforms for relational fields
num_lookups = len(lookups)
if num_lookups > 1:
raise FieldError(
"Related Field got invalid lookup: {}".format(lookups[0])
)
if len(targets) == 1:
col = self._get_col(targets[0], join_info.final_field, alias)
else:
col = MultiColSource(
alias, targets, join_info.targets, join_info.final_field
)
else:
col = self._get_col(targets[0], join_info.final_field, alias)
condition = self.build_lookup(lookups, col, value)
lookup_type = condition.lookup_name
clause = WhereNode([condition], connector=AND)
require_outer = (
lookup_type == "isnull" and condition.rhs is True and not current_negated
)
if (
current_negated
and (lookup_type != "isnull" or condition.rhs is False)
and condition.rhs is not None
):
require_outer = True
if lookup_type != "isnull":
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
if (
self.is_nullable(targets[0])
or self.alias_map[join_list[-1]].join_type == LOUTER
):
lookup_class = targets[0].get_lookup("isnull")
col = self._get_col(targets[0], join_info.targets[0], alias)
clause.add(lookup_class(col, False), AND)
# If someval is a nullable column, someval IS NOT NULL is
# added.
if isinstance(value, Col) and self.is_nullable(value.target):
lookup_class = value.target.get_lookup("isnull")
clause.add(lookup_class(value, False), AND)
return clause, used_joins if not require_outer else ()
def add_filter(self, filter_lhs, filter_rhs):
self.add_q(Q((filter_lhs, filter_rhs)))
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for doing final
join promotion.
"""
# For join promotion this case is doing an AND for the added q_object
# and existing conditions. So, any existing inner join forces the join
# type to remain inner. Existing outer joins can however be demoted.
# (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
# rel_a doesn't produce any rows, then the whole condition must fail.
# So, demotion is OK.
existing_inner = {
a for a in self.alias_map if self.alias_map[a].join_type == INNER
}
clause, _ = self._add_q(q_object, self.used_aliases)
if clause:
self.where.add(clause, AND)
self.demote_joins(existing_inner)
def build_where(self, filter_expr):
return self.build_filter(filter_expr, allow_joins=False)[0]
def clear_where(self):
self.where = WhereNode()
def _add_q(
self,
q_object,
used_aliases,
branch_negated=False,
current_negated=False,
allow_joins=True,
split_subq=True,
check_filterable=True,
):
"""Add a Q-object to the current filter."""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = WhereNode(connector=connector, negated=q_object.negated)
joinpromoter = JoinPromoter(
q_object.connector, len(q_object.children), current_negated
)
for child in q_object.children:
child_clause, needed_inner = self.build_filter(
child,
can_reuse=used_aliases,
branch_negated=branch_negated,
current_negated=current_negated,
allow_joins=allow_joins,
split_subq=split_subq,
check_filterable=check_filterable,
)
joinpromoter.add_votes(needed_inner)
if child_clause:
target_clause.add(child_clause, connector)
needed_inner = joinpromoter.update_join_types(self)
return target_clause, needed_inner
def build_filtered_relation_q(
self, q_object, reuse, branch_negated=False, current_negated=False
):
"""Add a FilteredRelation object to the current filter."""
connector = q_object.connector
current_negated ^= q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = WhereNode(connector=connector, negated=q_object.negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause = self.build_filtered_relation_q(
child,
reuse=reuse,
branch_negated=branch_negated,
current_negated=current_negated,
)
else:
child_clause, _ = self.build_filter(
child,
can_reuse=reuse,
branch_negated=branch_negated,
current_negated=current_negated,
allow_joins=True,
split_subq=False,
reuse_with_filtered_relation=True,
)
target_clause.add(child_clause, connector)
return target_clause
def add_filtered_relation(self, filtered_relation, alias):
filtered_relation.alias = alias
lookups = dict(get_children_from_q(filtered_relation.condition))
relation_lookup_parts, relation_field_parts, _ = self.solve_lookup_type(
filtered_relation.relation_name
)
if relation_lookup_parts:
raise ValueError(
"FilteredRelation's relation_name cannot contain lookups "
"(got %r)." % filtered_relation.relation_name
)
for lookup in chain(lookups):
lookup_parts, lookup_field_parts, _ = self.solve_lookup_type(lookup)
shift = 2 if not lookup_parts else 1
lookup_field_path = lookup_field_parts[:-shift]
for idx, lookup_field_part in enumerate(lookup_field_path):
if len(relation_field_parts) > idx:
if relation_field_parts[idx] != lookup_field_part:
raise ValueError(
"FilteredRelation's condition doesn't support "
"relations outside the %r (got %r)."
% (filtered_relation.relation_name, lookup)
)
else:
raise ValueError(
"FilteredRelation's condition doesn't support nested "
"relations deeper than the relation_name (got %r for "
"%r)." % (lookup, filtered_relation.relation_name)
)
self._filtered_relations[filtered_relation.alias] = filtered_relation
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walk the list of names and turns them into PathInfo tuples. A single
name in 'names' can generate multiple PathInfos (m2m, for example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
If fail_on_missing is set to True, then a name that can't be resolved
will generate a FieldError.
Return a list of PathInfo tuples. In addition return the final field
(the last used join field) and target (which is a field guaranteed to
contain the same value as the final field). Finally, return those names
that weren't found (which are likely transforms and the final lookup).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == "pk":
name = opts.pk.name
field = None
filtered_relation = None
try:
if opts is None:
raise FieldDoesNotExist
field = opts.get_field(name)
except FieldDoesNotExist:
if name in self.annotation_select:
field = self.annotation_select[name].output_field
elif name in self._filtered_relations and pos == 0:
filtered_relation = self._filtered_relations[name]
if LOOKUP_SEP in filtered_relation.relation_name:
parts = filtered_relation.relation_name.split(LOOKUP_SEP)
filtered_relation_path, field, _, _ = self.names_to_path(
parts,
opts,
allow_many,
fail_on_missing,
)
path.extend(filtered_relation_path[:-1])
else:
field = opts.get_field(filtered_relation.relation_name)
if field is not None:
# Fields that contain one-to-many relations with a generic
# model (like a GenericForeignKey) cannot generate reverse
# relations and therefore cannot be used for reverse querying.
if field.is_relation and not field.related_model:
raise FieldError(
"Field %r does not generate an automatic reverse "
"relation and therefore cannot be used for reverse "
"querying. If it is a GenericForeignKey, consider "
"adding a GenericRelation." % name
)
try:
model = field.model._meta.concrete_model
except AttributeError:
# QuerySet.annotate() may introduce fields that aren't
# attached to a model.
model = None
else:
# We didn't find the current field, so move position back
# one step.
pos -= 1
if pos == -1 or fail_on_missing:
available = sorted(
[
*get_field_names_from_opts(opts),
*self.annotation_select,
*self._filtered_relations,
]
)
raise FieldError(
"Cannot resolve keyword '%s' into field. "
"Choices are: %s" % (name, ", ".join(available))
)
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if opts is not None and model is not opts.model:
path_to_parent = opts.get_path_to_parent(model)
if path_to_parent:
path.extend(path_to_parent)
cur_names_with_path[1].extend(path_to_parent)
opts = path_to_parent[-1].to_opts
if hasattr(field, "path_infos"):
if filtered_relation:
pathinfos = field.get_path_info(filtered_relation)
else:
pathinfos = field.path_infos
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0 : inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
if fail_on_missing and pos + 1 != len(names):
raise FieldError(
"Cannot resolve keyword %r into field. Join on '%s'"
" not permitted." % (names[pos + 1], name)
)
break
return path, final_field, targets, names[pos + 1 :]
def setup_joins(
self,
names,
opts,
alias,
can_reuse=None,
allow_many=True,
reuse_with_filtered_relation=False,
):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
The 'reuse_with_filtered_relation' can be used to force 'can_reuse'
parameter and force the relation on the given connections.
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Return the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins, the
field path traveled to generate the joins, and a transform function
that takes a field and alias and is equivalent to `field.get_col(alias)`
in the simple case but wraps field transforms if they were included in
names.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# The transform can't be applied yet, as joins must be trimmed later.
# To avoid making every caller of this method look up transforms
# directly, compute transforms here and create a partial that converts
# fields to the appropriate wrapped version.
def final_transformer(field, alias):
if not self.alias_cols:
alias = None
return field.get_col(alias)
# Try resolving all the names as fields first. If there's an error,
# treat trailing names as lookups until a field can be resolved.
last_field_exception = None
for pivot in range(len(names), 0, -1):
try:
path, final_field, targets, rest = self.names_to_path(
names[:pivot],
opts,
allow_many,
fail_on_missing=True,
)
except FieldError as exc:
if pivot == 1:
# The first item cannot be a lookup, so it's safe
# to raise the field error here.
raise
else:
last_field_exception = exc
else:
# The transforms are the remaining items that couldn't be
# resolved into fields.
transforms = names[pivot:]
break
for name in transforms:
def transform(field, alias, *, name, previous):
try:
wrapped = previous(field, alias)
return self.try_transform(wrapped, name)
except FieldError:
# FieldError is raised if the transform doesn't exist.
if isinstance(final_field, Field) and last_field_exception:
raise last_field_exception
else:
raise
final_transformer = functools.partial(
transform, name=name, previous=final_transformer
)
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for join in path:
if join.filtered_relation:
filtered_relation = join.filtered_relation.clone()
table_alias = filtered_relation.alias
else:
filtered_relation = None
table_alias = None
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = self.join_class(
opts.db_table,
alias,
table_alias,
INNER,
join.join_field,
nullable,
filtered_relation=filtered_relation,
)
reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None
alias = self.join(
connection,
reuse=reuse,
reuse_with_filtered_relation=reuse_with_filtered_relation,
)
joins.append(alias)
if filtered_relation:
filtered_relation.path = joins[:]
return JoinInfo(final_field, targets, opts, joins, path, final_transformer)
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Return the final target field and table alias and the new active
joins.
Always trim any direct join if the target column is already in the
previous table. Can't trim reverse joins as it's unknown if there's
anything on the other side of the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
if info.filtered_relation:
break
join_targets = {t.column for t in info.join_field.foreign_related_fields}
cur_targets = {t.column for t in targets}
if not cur_targets.issubset(join_targets):
break
targets_dict = {
r[1].column: r[0]
for r in info.join_field.related_fields
if r[1].column in cur_targets
}
targets = tuple(targets_dict[t.column] for t in targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
@classmethod
def _gen_cols(cls, exprs, include_external=False):
for expr in exprs:
if isinstance(expr, Col):
yield expr
elif include_external and callable(
getattr(expr, "get_external_cols", None)
):
yield from expr.get_external_cols()
elif hasattr(expr, "get_source_expressions"):
yield from cls._gen_cols(
expr.get_source_expressions(),
include_external=include_external,
)
@classmethod
def _gen_col_aliases(cls, exprs):
yield from (expr.alias for expr in cls._gen_cols(exprs))
def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):
annotation = self.annotations.get(name)
if annotation is not None:
if not allow_joins:
for alias in self._gen_col_aliases([annotation]):
if isinstance(self.alias_map[alias], Join):
raise FieldError(
"Joined field references are not permitted in this query"
)
if summarize:
# Summarize currently means we are doing an aggregate() query
# which is executed as a wrapped subquery if any of the
# aggregate() elements reference an existing annotation. In
# that case we need to return a Ref to the subquery's annotation.
if name not in self.annotation_select:
raise FieldError(
"Cannot aggregate over the '%s' alias. Use annotate() "
"to promote it." % name
)
return Ref(name, self.annotation_select[name])
else:
return annotation
else:
field_list = name.split(LOOKUP_SEP)
annotation = self.annotations.get(field_list[0])
if annotation is not None:
for transform in field_list[1:]:
annotation = self.try_transform(annotation, transform)
return annotation
join_info = self.setup_joins(
field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse
)
targets, final_alias, join_list = self.trim_joins(
join_info.targets, join_info.joins, join_info.path
)
if not allow_joins and len(join_list) > 1:
raise FieldError(
"Joined field references are not permitted in this query"
)
if len(targets) > 1:
raise FieldError(
"Referencing multicolumn fields with F() objects isn't supported"
)
# Verify that the last lookup in name is a field or a transform:
# transform_function() raises FieldError if not.
transform = join_info.transform_function(targets[0], final_alias)
if reuse is not None:
reuse.update(join_list)
return transform
def split_exclude(self, filter_expr, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
For example, if the origin filter is ~Q(child__name='foo'), filter_expr
is ('child__name', 'foo') and can_reuse is a set of joins usable for
filters in the original query.
We will turn this into equivalent of:
WHERE NOT EXISTS(
SELECT 1
FROM child
WHERE name = 'foo' AND child.parent_id = parent.id
LIMIT 1
)
"""
# Generate the inner query.
query = self.__class__(self.model)
query._filtered_relations = self._filtered_relations
filter_lhs, filter_rhs = filter_expr
if isinstance(filter_rhs, OuterRef):
filter_rhs = OuterRef(filter_rhs)
elif isinstance(filter_rhs, F):
filter_rhs = OuterRef(filter_rhs.name)
query.add_filter(filter_lhs, filter_rhs)
query.clear_ordering(force=True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
col = query.select[0]
select_field = col.target
alias = col.alias
if alias in can_reuse:
pk = select_field.model._meta.pk
# Need to add a restriction so that outer query's filters are in effect for
# the subquery, too.
query.bump_prefix(self)
lookup_class = select_field.get_lookup("exact")
# Note that the query.select[0].alias is different from alias
# due to bump_prefix above.
lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias))
query.where.add(lookup, AND)
query.external_aliases[alias] = True
lookup_class = select_field.get_lookup("exact")
lookup = lookup_class(col, ResolvedOuterRef(trimmed_prefix))
query.where.add(lookup, AND)
condition, needed_inner = self.build_filter(Exists(query))
if contains_louter:
or_null_condition, _ = self.build_filter(
("%s__isnull" % trimmed_prefix, True),
current_negated=True,
branch_negated=True,
can_reuse=can_reuse,
)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition, needed_inner
def set_empty(self):
self.where.add(NothingNode(), AND)
for query in self.combined_queries:
query.set_empty()
def is_empty(self):
return any(isinstance(c, NothingNode) for c in self.where.children)
def set_limits(self, low=None, high=None):
"""
Adjust the limits on the rows retrieved. Use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, convert them to the appropriate offset and limit values.
Apply any limits passed in here to the existing constraints. Add low
to the current low value and clamp both to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
if self.low_mark == self.high_mark:
self.set_empty()
def clear_limits(self):
"""Clear any existing limits."""
self.low_mark, self.high_mark = 0, None
@property
def is_sliced(self):
return self.low_mark != 0 or self.high_mark is not None
def has_limit_one(self):
return self.high_mark is not None and (self.high_mark - self.low_mark) == 1
def can_filter(self):
"""
Return True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.is_sliced
def clear_select_clause(self):
"""Remove all fields from SELECT clause."""
self.select = ()
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_annotation_mask(())
def clear_select_fields(self):
"""
Clear the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = ()
self.values_select = ()
def add_select_col(self, col, name):
self.select += (col,)
self.values_select += (name,)
def set_select(self, cols):
self.default_cols = False
self.select = tuple(cols)
def add_distinct_fields(self, *field_names):
"""
Add and resolve the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Add the given (model) fields to the select set. Add the field names in
the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
cols = []
for name in field_names:
# Join promotion note - we must not remove any rows here, so
# if there is no existing joins, use outer join.
join_info = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m
)
targets, final_alias, joins = self.trim_joins(
join_info.targets,
join_info.joins,
join_info.path,
)
for target in targets:
cols.append(join_info.transform_function(target, final_alias))
if cols:
self.set_select(cols)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
elif name in self.annotations:
raise FieldError(
"Cannot select the '%s' alias. Use annotate() to promote "
"it." % name
)
else:
names = sorted(
[
*get_field_names_from_opts(opts),
*self.extra,
*self.annotation_select,
*self._filtered_relations,
]
)
raise FieldError(
"Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names))
)
def add_ordering(self, *ordering):
"""
Add items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or OrderBy
expressions.
If 'ordering' is empty, clear all ordering from the query.
"""
errors = []
for item in ordering:
if isinstance(item, str):
if item == "?":
continue
if item.startswith("-"):
item = item[1:]
if item in self.annotations:
continue
if self.extra and item in self.extra:
continue
# names_to_path() validates the lookup. A descriptive
# FieldError will be raise if it's not.
self.names_to_path(item.split(LOOKUP_SEP), self.model._meta)
elif not hasattr(item, "resolve_expression"):
errors.append(item)
if getattr(item, "contains_aggregate", False):
raise FieldError(
"Using an aggregate in order_by() without also including "
"it in annotate() is not allowed: %s" % item
)
if errors:
raise FieldError("Invalid order_by arguments: %s" % errors)
if ordering:
self.order_by += ordering
else:
self.default_ordering = False
def clear_ordering(self, force=False, clear_default=True):
"""
Remove any ordering settings if the current query allows it without
side effects, set 'force' to True to clear the ordering regardless.
If 'clear_default' is True, there will be no ordering in the resulting
query (not even the model's default).
"""
if not force and (
self.is_sliced or self.distinct_fields or self.select_for_update
):
return
self.order_by = ()
self.extra_order_by = ()
if clear_default:
self.default_ordering = False
def set_group_by(self, allow_aliases=True):
"""
Expand the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
# Column names from JOINs to check collisions with aliases.
if allow_aliases:
column_names = set()
seen_models = set()
for join in list(self.alias_map.values())[1:]: # Skip base table.
model = join.join_field.related_model
if model not in seen_models:
column_names.update(
{field.column for field in model._meta.local_concrete_fields}
)
seen_models.add(model)
group_by = list(self.select)
if self.annotation_select:
for alias, annotation in self.annotation_select.items():
if not allow_aliases or alias in column_names:
alias = None
group_by_cols = annotation.get_group_by_cols(alias=alias)
group_by.extend(group_by_cols)
self.group_by = tuple(group_by)
def add_select_related(self, fields):
"""
Set up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
if isinstance(self.select_related, bool):
field_dict = {}
else:
field_dict = self.select_related
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Add data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = {}
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
self.check_alias(name)
entry = str(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
if pos == 0 or entry[pos - 1] != "%":
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""Remove any fields from the deferred loading set."""
self.deferred_loading = (frozenset(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. Add the new field names to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL column names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
if new_existing := existing.difference(field_names):
self.deferred_loading = new_existing, False
else:
self.clear_deferred_loading()
if new_only := set(field_names).difference(existing):
self.deferred_loading = new_only, True
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, remove
those names from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if "pk" in field_names:
field_names.remove("pk")
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = frozenset(field_names), False
def set_annotation_mask(self, names):
"""Set the mask of annotations that will be returned by the SELECT."""
if names is None:
self.annotation_select_mask = None
else:
self.annotation_select_mask = set(names)
self._annotation_select_cache = None
def append_annotation_mask(self, names):
if self.annotation_select_mask is not None:
self.set_annotation_mask(self.annotation_select_mask.union(names))
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT.
Don't remove them from the Query since they might be used later.
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def set_values(self, fields):
self.select_related = False
self.clear_deferred_loading()
self.clear_select_fields()
if fields:
field_names = []
extra_names = []
annotation_names = []
if not self.extra and not self.annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
self.default_cols = False
for f in fields:
if f in self.extra_select:
extra_names.append(f)
elif f in self.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
self.set_extra_mask(extra_names)
self.set_annotation_mask(annotation_names)
selected = frozenset(field_names + extra_names + annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
selected = frozenset(field_names)
# Selected annotations must be known before setting the GROUP BY
# clause.
if self.group_by is True:
self.add_fields(
(f.attname for f in self.model._meta.concrete_fields), False
)
# Disable GROUP BY aliases to avoid orphaning references to the
# SELECT clause which is about to be cleared.
self.set_group_by(allow_aliases=False)
self.clear_select_fields()
elif self.group_by:
# Resolve GROUP BY annotation references if they are not part of
# the selected fields anymore.
group_by = []
for expr in self.group_by:
if isinstance(expr, Ref) and expr.refs not in selected:
expr = self.annotations[expr.refs]
group_by.append(expr)
self.group_by = tuple(group_by)
self.values_select = tuple(field_names)
self.add_fields(field_names, True)
@property
def annotation_select(self):
"""
Return the dictionary of aggregate columns that are not masked and
should be used in the SELECT clause. Cache this result for performance.
"""
if self._annotation_select_cache is not None:
return self._annotation_select_cache
elif not self.annotations:
return {}
elif self.annotation_select_mask is not None:
self._annotation_select_cache = {
k: v
for k, v in self.annotations.items()
if k in self.annotation_select_mask
}
return self._annotation_select_cache
else:
return self.annotations
@property
def extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
if not self.extra:
return {}
elif self.extra_select_mask is not None:
self._extra_select_cache = {
k: v for k, v in self.extra.items() if k in self.extra_select_mask
}
return self._extra_select_cache
else:
return self.extra
def trim_start(self, names_with_path):
"""
Trim joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also set the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Return a lookup usable for doing outerq.filter(lookup=self) and a
boolean indicating if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [
t for t in self.alias_map if t in self._lookup_joins or t == self.base_table
]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:
contains_louter = True
alias = lookup_tables[trimmed_paths]
self.unref_alias(alias)
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for:
# - LEFT JOINs because we would miss those rows that have nothing on
# the outer side,
# - INNER JOINs from filtered relations because we would miss their
# filters.
first_join = self.alias_map[lookup_tables[trimmed_paths + 1]]
if first_join.join_type != LOUTER and not first_join.filtered_relation:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(
None, lookup_tables[trimmed_paths + 1]
)
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
# The found starting point is likely a join_class instead of a
# base_table_class reference. But the first entry in the query's FROM
# clause must not be a JOIN.
for table in self.alias_map:
if self.alias_refcount[table] > 0:
self.alias_map[table] = self.base_table_class(
self.alias_map[table].table_name,
table,
)
break
self.set_select([f.get_col(select_alias) for f in select_fields])
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
Check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
return field.null or (
field.empty_strings_allowed
and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
)
def get_order_dir(field, default="ASC"):
"""
Return the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == "-":
return field[1:], dirn[1]
return field, dirn[0]
class JoinPromoter:
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.votes = Counter()
def __repr__(self):
return (
f"{self.__class__.__qualname__}(connector={self.connector!r}, "
f"num_children={self.num_children!r}, negated={self.negated!r})"
)
def add_votes(self, votes):
"""
Add single vote per item to self.votes. Parameter can be any
iterable.
"""
self.votes.update(votes)
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == OR and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == AND or (
self.effective_connector == OR and votes == self.num_children
):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
|
531072f8a85eee8d89020e5e95ac060476c3126fc3b1a95c56775575f36a4cfb | import collections
import json
import re
from functools import partial
from itertools import chain
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DatabaseError, NotSupportedError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import F, OrderBy, RawSQL, Ref, Value
from django.db.models.functions import Cast, Random
from django.db.models.lookups import Lookup
from django.db.models.query_utils import select_related_descend
from django.db.models.sql.constants import (
CURSOR,
GET_ITERATOR_CHUNK_SIZE,
MULTI,
NO_RESULTS,
ORDER_DIR,
SINGLE,
)
from django.db.models.sql.query import Query, get_order_dir
from django.db.models.sql.where import AND
from django.db.transaction import TransactionManagementError
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
from django.utils.regex_helper import _lazy_re_compile
class SQLCompiler:
# Multiline ordering SQL clause may appear from RawSQL.
ordering_parts = _lazy_re_compile(
r"^(.*)\s(?:ASC|DESC).*",
re.MULTILINE | re.DOTALL,
)
def __init__(self, query, connection, using, elide_empty=True):
self.query = query
self.connection = connection
self.using = using
# Some queries, e.g. coalesced aggregation, need to be executed even if
# they would return an empty result set.
self.elide_empty = elide_empty
self.quote_cache = {"*": "*"}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self._meta_ordering = None
def __repr__(self):
return (
f"<{self.__class__.__qualname__} "
f"model={self.query.model.__qualname__} "
f"connection={self.connection!r} using={self.using!r}>"
)
def setup_query(self, with_col_aliases=False):
if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select(
with_col_aliases=with_col_aliases,
)
self.col_count = len(self.select)
def pre_sql_setup(self, with_col_aliases=False):
"""
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query(with_col_aliases=with_col_aliases)
order_by = self.get_order_by()
self.where, self.having, self.qualify = self.query.where.split_having_qualify(
must_group_by=self.query.group_by is not None
)
extra_select = self.get_extra_select(order_by, self.select)
self.has_extra_select = bool(extra_select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk')
# .annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, "as_sql"):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
ref_sources = {expr.source for expr in expressions if isinstance(expr, Ref)}
for expr, _, _ in select:
# Skip members of the select clause that are already included
# by reference.
if expr in ref_sources:
continue
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
if not self._meta_ordering:
for expr, (sql, params, is_ref) in order_by:
# Skip references to the SELECT clause, as all expressions in
# the SELECT clause are already part of the GROUP BY.
if not is_ref:
expressions.extend(expr.get_group_by_cols())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
try:
sql, params = self.compile(expr)
except EmptyResultSet:
continue
sql, params = expr.select_format(self, sql, params)
params_hash = make_hashable(params)
if (sql, params_hash) not in seen:
result.append((sql, params))
seen.add((sql, params_hash))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key.
if self.connection.features.allows_group_by_pk:
# Determine if the main model's primary key is in the query.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (
getattr(expr, "target", None) == self.query.model._meta.pk
and getattr(expr, "alias", None) == self.query.base_table
):
pk = expr
break
# If the main model's primary key is in the query, group by that
# field, HAVING expressions, and expressions associated with tables
# that don't have a primary key included in the grouped columns.
if pk:
pk_aliases = {
expr.alias
for expr in expressions
if hasattr(expr, "target") and expr.target.primary_key
}
expressions = [pk] + [
expr
for expr in expressions
if expr in having
or (
getattr(expr, "alias", None) is not None
and expr.alias not in pk_aliases
)
]
elif self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
# Unmanaged models are excluded because they could be representing
# database views on which the optimization might not be allowed.
pks = {
expr
for expr in expressions
if (
hasattr(expr, "target")
and expr.target.primary_key
and self.connection.features.allows_group_by_selected_pks_on_model(
expr.target.model
)
)
}
aliases = {expr.alias for expr in pks}
expressions = [
expr
for expr in expressions
if expr in pks or getattr(expr, "alias", None) not in aliases
]
return expressions
def get_select(self, with_col_aliases=False):
"""
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- The base model of the query.
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
select_mask = self.query.get_select_mask()
if self.query.default_cols:
cols = self.get_default_columns(select_mask)
else:
# self.query.select is a special case. These columns never go to
# any model.
cols = self.query.select
if cols:
select_list = []
for col in cols:
select_list.append(select_idx)
select.append((col, None))
select_idx += 1
klass_info = {
"model": self.query.model,
"select_fields": select_list,
}
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select, select_mask)
klass_info["related_klass_infos"] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info["related_klass_infos"]:
if ki["from_parent"]:
ki["select_fields"] = (
klass_info["select_fields"] + ki["select_fields"]
)
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
col_idx = 1
for col, alias in select:
try:
sql, params = self.compile(col)
except EmptyResultSet:
empty_result_set_value = getattr(
col, "empty_result_set_value", NotImplemented
)
if empty_result_set_value is NotImplemented:
# Select a predicate that's always False.
sql, params = "0", ()
else:
sql, params = self.compile(Value(empty_result_set_value))
else:
sql, params = col.select_format(self, sql, params)
if alias is None and with_col_aliases:
alias = f"col{col_idx}"
col_idx += 1
ret.append((col, (sql, params), alias))
return ret, klass_info, annotations
def _order_by_pairs(self):
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
elif self.query.order_by:
ordering = self.query.order_by
elif (meta := self.query.get_meta()) and meta.ordering:
ordering = meta.ordering
self._meta_ordering = ordering
else:
ordering = []
if self.query.standard_ordering:
default_order, _ = ORDER_DIR["ASC"]
else:
default_order, _ = ORDER_DIR["DESC"]
for field in ordering:
if hasattr(field, "resolve_expression"):
if isinstance(field, Value):
# output_field must be resolved for constants.
field = Cast(field, field.output_field)
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field = field.copy()
field.reverse_ordering()
yield field, False
continue
if field == "?": # random
yield OrderBy(Random()), False
continue
col, order = get_order_dir(field, default_order)
descending = order == "DESC"
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
yield (
OrderBy(
Ref(col, self.query.annotation_select[col]),
descending=descending,
),
True,
)
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT
# clause.
if self.query.combinator and self.select:
# Don't use the resolved annotation because other
# combinated queries might define it differently.
expr = F(col)
else:
expr = self.query.annotations[col]
if isinstance(expr, Value):
# output_field must be resolved for constants.
expr = Cast(expr, expr.output_field)
yield OrderBy(expr, descending=descending), False
continue
if "." in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split(".", 1)
yield (
OrderBy(
RawSQL(
"%s.%s" % (self.quote_name_unless_alias(table), col), []
),
descending=descending,
),
False,
)
continue
if self.query.extra and col in self.query.extra:
if col in self.query.extra_select:
yield (
OrderBy(
Ref(col, RawSQL(*self.query.extra[col])),
descending=descending,
),
True,
)
else:
yield (
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False,
)
else:
if self.query.combinator and self.select:
# Don't use the first model's field because other
# combinated queries might define it differently.
yield OrderBy(F(col), descending=descending), False
else:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
yield from self.find_ordering_name(
field,
self.query.get_meta(),
default_order=default_order,
)
def get_order_by(self):
"""
Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for
the ORDER BY clause.
The order_by clause can alter the select clause (for example it can add
aliases to clauses that do not yet have one, or it can add totally new
select clauses).
"""
result = []
seen = set()
for expr, is_ref in self._order_by_pairs():
resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)
if self.query.combinator and self.select:
src = resolved.get_source_expressions()[0]
expr_src = expr.get_source_expressions()[0]
# Relabel order by columns to raw numbers if this is a combined
# query; necessary since the columns can't be referenced by the
# fully qualified name and the simple column names may collide.
for idx, (sel_expr, _, col_alias) in enumerate(self.select):
if is_ref and col_alias == src.refs:
src = src.source
elif col_alias and not (
isinstance(expr_src, F) and col_alias == expr_src.name
):
continue
if src == sel_expr:
resolved.set_source_expressions([RawSQL("%d" % (idx + 1), ())])
break
else:
if col_alias:
raise DatabaseError(
"ORDER BY term does not match any column in the result set."
)
# Add column used in ORDER BY clause to the selected
# columns and to each combined query.
order_by_idx = len(self.query.select) + 1
col_name = f"__orderbycol{order_by_idx}"
for q in self.query.combined_queries:
q.add_annotation(expr_src, col_name)
self.query.add_select_col(resolved, col_name)
resolved.set_source_expressions([RawSQL(f"{order_by_idx}", ())])
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql)[1]
params_hash = make_hashable(params)
if (without_ordering, params_hash) in seen:
continue
seen.add((without_ordering, params_hash))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
if self.query.distinct and not self.query.distinct_fields:
select_sql = [t[1] for t in select]
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql)[1]
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if (
(name in self.query.alias_map and name not in self.query.table_map)
or name in self.query.extra_select
or (
self.query.external_aliases.get(name)
and name not in self.query.table_map
)
):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node):
vendor_impl = getattr(node, "as_" + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
return sql, params
def get_combinator_sql(self, combinator, all):
features = self.connection.features
compilers = [
query.get_compiler(self.using, self.connection, self.elide_empty)
for query in self.query.combined_queries
if not query.is_empty()
]
if not features.supports_slicing_ordering_in_compound:
for query, compiler in zip(self.query.combined_queries, compilers):
if query.low_mark or query.high_mark:
raise DatabaseError(
"LIMIT/OFFSET not allowed in subqueries of compound statements."
)
if compiler.get_order_by():
raise DatabaseError(
"ORDER BY not allowed in subqueries of compound statements."
)
parts = ()
for compiler in compilers:
try:
# If the columns list is limited, then all combined queries
# must have the same columns list. Set the selects defined on
# the query on all combined queries, if not already set.
if not compiler.query.values_select and self.query.values_select:
compiler.query = compiler.query.clone()
compiler.query.set_values(
(
*self.query.extra_select,
*self.query.values_select,
*self.query.annotation_select,
)
)
part_sql, part_args = compiler.as_sql()
if compiler.query.combinator:
# Wrap in a subquery if wrapping in parentheses isn't
# supported.
if not features.supports_parentheses_in_compound:
part_sql = "SELECT * FROM ({})".format(part_sql)
# Add parentheses when combining with compound query if not
# already added for all compound queries.
elif (
self.query.subquery
or not features.supports_slicing_ordering_in_compound
):
part_sql = "({})".format(part_sql)
elif (
self.query.subquery
and features.supports_slicing_ordering_in_compound
):
part_sql = "({})".format(part_sql)
parts += ((part_sql, part_args),)
except EmptyResultSet:
# Omit the empty queryset with UNION and with DIFFERENCE if the
# first queryset is nonempty.
if combinator == "union" or (combinator == "difference" and parts):
continue
raise
if not parts:
raise EmptyResultSet
combinator_sql = self.connection.ops.set_operators[combinator]
if all and combinator == "union":
combinator_sql += " ALL"
braces = "{}"
if not self.query.subquery and features.supports_slicing_ordering_in_compound:
braces = "({})"
sql_parts, args_parts = zip(
*((braces.format(sql), args) for sql, args in parts)
)
result = [" {} ".format(combinator_sql).join(sql_parts)]
params = []
for part in args_parts:
params.extend(part)
return result, params
def get_qualify_sql(self):
where_parts = []
if self.where:
where_parts.append(self.where)
if self.having:
where_parts.append(self.having)
inner_query = self.query.clone()
inner_query.subquery = True
inner_query.where = inner_query.where.__class__(where_parts)
# Augment the inner query with any window function references that
# might have been masked via values() and alias(). If any masked
# aliases are added they'll be masked again to avoid fetching
# the data in the `if qual_aliases` branch below.
select = {
expr: alias for expr, _, alias in self.get_select(with_col_aliases=True)[0]
}
qual_aliases = set()
replacements = {}
expressions = list(self.qualify.leaves())
while expressions:
expr = expressions.pop()
if select_alias := (select.get(expr) or replacements.get(expr)):
replacements[expr] = select_alias
elif isinstance(expr, Lookup):
expressions.extend(expr.get_source_expressions())
else:
num_qual_alias = len(qual_aliases)
select_alias = f"qual{num_qual_alias}"
qual_aliases.add(select_alias)
inner_query.add_annotation(expr, select_alias)
replacements[expr] = select_alias
self.qualify = self.qualify.replace_expressions(
{expr: Ref(alias, expr) for expr, alias in replacements.items()}
)
inner_query_compiler = inner_query.get_compiler(
self.using, elide_empty=self.elide_empty
)
inner_sql, inner_params = inner_query_compiler.as_sql(
# The limits must be applied to the outer query to avoid pruning
# results too eagerly.
with_limits=False,
# Force unique aliasing of selected columns to avoid collisions
# and make rhs predicates referencing easier.
with_col_aliases=True,
)
qualify_sql, qualify_params = self.compile(self.qualify)
result = [
"SELECT * FROM (",
inner_sql,
")",
self.connection.ops.quote_name("qualify"),
"WHERE",
qualify_sql,
]
if qual_aliases:
# If some select aliases were unmasked for filtering purposes they
# must be masked back.
cols = [self.connection.ops.quote_name(alias) for alias in select.values()]
result = [
"SELECT",
", ".join(cols),
"FROM (",
*result,
")",
self.connection.ops.quote_name("qualify_mask"),
]
return result, list(inner_params) + qualify_params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup(
with_col_aliases=with_col_aliases,
)
for_update_part = None
# Is a LIMIT/OFFSET clause needed?
with_limit_offset = with_limits and (
self.query.high_mark is not None or self.query.low_mark
)
combinator = self.query.combinator
features = self.connection.features
if combinator:
if not getattr(features, "supports_select_{}".format(combinator)):
raise NotSupportedError(
"{} is not supported on this database backend.".format(
combinator
)
)
result, params = self.get_combinator_sql(
combinator, self.query.combinator_all
)
elif self.qualify:
result, params = self.get_qualify_sql()
order_by = None
else:
distinct_fields, distinct_params = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct'
# (see docstring of get_from_clause() for details).
from_, f_params = self.get_from_clause()
try:
where, w_params = (
self.compile(self.where) if self.where is not None else ("", [])
)
except EmptyResultSet:
if self.elide_empty:
raise
# Use a predicate that's always False.
where, w_params = "0 = 1", []
having, h_params = (
self.compile(self.having) if self.having is not None else ("", [])
)
result = ["SELECT"]
params = []
if self.query.distinct:
distinct_result, distinct_params = self.connection.ops.distinct_sql(
distinct_fields,
distinct_params,
)
result += distinct_result
params += distinct_params
out_cols = []
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = "%s AS %s" % (
s_sql,
self.connection.ops.quote_name(alias),
)
params.extend(s_params)
out_cols.append(s_sql)
result += [", ".join(out_cols)]
if from_:
result += ["FROM", *from_]
elif self.connection.features.bare_select_suffix:
result += [self.connection.features.bare_select_suffix]
params.extend(f_params)
if self.query.select_for_update and features.has_select_for_update:
if (
self.connection.get_autocommit()
# Don't raise an exception when database doesn't
# support transactions, as it's a noop.
and features.supports_transactions
):
raise TransactionManagementError(
"select_for_update cannot be used outside of a transaction."
)
if (
with_limit_offset
and not features.supports_select_for_update_with_limit
):
raise NotSupportedError(
"LIMIT/OFFSET is not supported with "
"select_for_update on this database backend."
)
nowait = self.query.select_for_update_nowait
skip_locked = self.query.select_for_update_skip_locked
of = self.query.select_for_update_of
no_key = self.query.select_for_no_key_update
# If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the
# backend doesn't support it, raise NotSupportedError to
# prevent a possible deadlock.
if nowait and not features.has_select_for_update_nowait:
raise NotSupportedError(
"NOWAIT is not supported on this database backend."
)
elif skip_locked and not features.has_select_for_update_skip_locked:
raise NotSupportedError(
"SKIP LOCKED is not supported on this database backend."
)
elif of and not features.has_select_for_update_of:
raise NotSupportedError(
"FOR UPDATE OF is not supported on this database backend."
)
elif no_key and not features.has_select_for_no_key_update:
raise NotSupportedError(
"FOR NO KEY UPDATE is not supported on this "
"database backend."
)
for_update_part = self.connection.ops.for_update_sql(
nowait=nowait,
skip_locked=skip_locked,
of=self.get_select_for_update_of_arguments(),
no_key=no_key,
)
if for_update_part and features.for_update_after_from:
result.append(for_update_part)
if where:
result.append("WHERE %s" % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) is not implemented."
)
order_by = order_by or self.connection.ops.force_no_ordering()
result.append("GROUP BY %s" % ", ".join(grouping))
if self._meta_ordering:
order_by = None
if having:
result.append("HAVING %s" % having)
params.extend(h_params)
if self.query.explain_info:
result.insert(
0,
self.connection.ops.explain_query_prefix(
self.query.explain_info.format,
**self.query.explain_info.options,
),
)
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append("ORDER BY %s" % ", ".join(ordering))
if with_limit_offset:
result.append(
self.connection.ops.limit_offset_sql(
self.query.low_mark, self.query.high_mark
)
)
if for_update_part and not features.for_update_after_from:
result.append(for_update_part)
if self.query.subquery and extra_select:
# If the query is used as a subquery, the extra selects would
# result in more columns than the left-hand side expression is
# expecting. This can happen when a subquery uses a combination
# of order_by() and distinct(), forcing the ordering expressions
# to be selected as well. Wrap the query in another subquery
# to exclude extraneous selects.
sub_selects = []
sub_params = []
for index, (select, _, alias) in enumerate(self.select, start=1):
if alias:
sub_selects.append(
"%s.%s"
% (
self.connection.ops.quote_name("subquery"),
self.connection.ops.quote_name(alias),
)
)
else:
select_clone = select.relabeled_clone(
{select.alias: "subquery"}
)
subselect, subparams = select_clone.as_sql(
self, self.connection
)
sub_selects.append(subselect)
sub_params.extend(subparams)
return "SELECT %s FROM (%s) subquery" % (
", ".join(sub_selects),
" ".join(result),
), tuple(sub_params + params)
return " ".join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def get_default_columns(
self, select_mask, start_alias=None, opts=None, from_parent=None
):
"""
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
if (opts := self.query.get_meta()) is None:
return result
start_alias = start_alias or self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if (
from_parent
and model is not None
and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model
)
):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if select_mask and field not in select_mask:
continue
alias = self.query.join_parent_model(opts, model, start_alias, seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Return a quoted list of fields to use in DISTINCT ON part of the query.
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
result = []
params = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _, transform_function = self._setup_joins(
parts, opts, None
)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(self.connection.ops.quote_name(name))
else:
r, p = self.compile(transform_function(target, alias))
result.append(r)
params.append(p)
return result, params
def find_ordering_name(
self, name, opts, alias=None, default_order="ASC", already_seen=None
):
"""
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = order == "DESC"
pieces = name.split(LOOKUP_SEP)
(
field,
targets,
alias,
joins,
path,
opts,
transform_function,
) = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless it is the pk
# shortcut or the attribute name of the field that is specified.
if (
field.is_relation
and opts.ordering
and getattr(field, "attname", None) != pieces[-1]
and name != "pk"
):
# Firstly, avoid infinite loops.
already_seen = already_seen or set()
join_tuple = tuple(
getattr(self.query.alias_map[j], "join_cols", None) for j in joins
)
if join_tuple in already_seen:
raise FieldError("Infinite loop caused by ordering.")
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
if hasattr(item, "resolve_expression") and not isinstance(
item, OrderBy
):
item = item.desc() if descending else item.asc()
if isinstance(item, OrderBy):
results.append(
(item.prefix_references(f"{name}{LOOKUP_SEP}"), False)
)
continue
results.extend(
(expr.prefix_references(f"{name}{LOOKUP_SEP}"), is_ref)
for expr, is_ref in self.find_ordering_name(
item, opts, alias, order, already_seen
)
)
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [
(OrderBy(transform_function(t, alias), descending=descending), False)
for t in targets
]
def _setup_joins(self, pieces, opts, alias):
"""
Helper method for get_order_by() and get_distinct().
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
alias = alias or self.query.get_initial_alias()
field, targets, opts, joins, path, transform_function = self.query.setup_joins(
pieces, opts, alias
)
alias = joins[-1]
return field, targets, alias, joins, path, opts, transform_function
def get_from_clause(self):
"""
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
for alias in tuple(self.query.alias_map):
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if (
alias not in self.query.alias_map
or self.query.alias_refcount[alias] == 1
):
result.append(", %s" % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(
self,
select,
select_mask,
opts=None,
root_alias=None,
cur_depth=1,
requested=None,
restricted=None,
):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects
if f.field.unique
)
return chain(
direct_choices, reverse_choices, self.query._filtered_relations
)
related_klass_infos = []
if not restricted and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
restricted = isinstance(self.query.select_related, dict)
if restricted:
requested = self.query.select_related
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info["related_klass_infos"] = related_klass_infos
for f in opts.fields:
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or f.name in requested:
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s"
% (
f.name,
", ".join(_get_field_choices()) or "(none)",
)
)
else:
next = False
if not select_related_descend(f, restricted, requested, select_mask):
continue
related_select_mask = select_mask.get(f) or {}
klass_info = {
"model": f.remote_field.model,
"field": f,
"reverse": False,
"local_setter": f.set_cached_value,
"remote_setter": f.remote_field.set_cached_value
if f.unique
else lambda x, y: None,
"from_parent": False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _, _ = self.query.setup_joins([f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(
related_select_mask, start_alias=alias, opts=f.remote_field.model._meta
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next_klass_infos = self.get_related_selections(
select,
related_select_mask,
f.remote_field.model._meta,
alias,
cur_depth + 1,
next,
restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
related_select_mask = select_mask.get(f) or {}
if not select_related_descend(
f, restricted, requested, related_select_mask, reverse=True
):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
join_info = self.query.setup_joins(
[related_field_name], opts, root_alias
)
alias = join_info.joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
"model": model,
"field": f,
"reverse": True,
"local_setter": f.remote_field.set_cached_value,
"remote_setter": f.set_cached_value,
"from_parent": from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
related_select_mask,
start_alias=alias,
opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select,
related_select_mask,
model._meta,
alias,
cur_depth + 1,
next,
restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
def local_setter(obj, from_obj):
# Set a reverse fk object when relation is non-empty.
if from_obj:
f.remote_field.set_cached_value(from_obj, obj)
def remote_setter(name, obj, from_obj):
setattr(from_obj, name, obj)
for name in list(requested):
# Filtered relations work only on the topmost level.
if cur_depth > 1:
break
if name in self.query._filtered_relations:
fields_found.add(name)
f, _, join_opts, joins, _, _ = self.query.setup_joins(
[name], opts, root_alias
)
model = join_opts.model
alias = joins[-1]
from_parent = (
issubclass(model, opts.model) and model is not opts.model
)
klass_info = {
"model": model,
"field": f,
"reverse": True,
"local_setter": local_setter,
"remote_setter": partial(remote_setter, name),
"from_parent": from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
field_select_mask = select_mask.get((name, f)) or {}
columns = self.get_default_columns(
field_select_mask,
start_alias=alias,
opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next_requested = requested.get(name, {})
next_klass_infos = self.get_related_selections(
select,
field_select_mask,
opts=model._meta,
root_alias=alias,
cur_depth=cur_depth + 1,
requested=next_requested,
restricted=restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
"Invalid field name(s) given in select_related: %s. "
"Choices are: %s"
% (
", ".join(invalid_fields),
", ".join(_get_field_choices()) or "(none)",
)
)
return related_klass_infos
def get_select_for_update_of_arguments(self):
"""
Return a quoted list of arguments for the SELECT FOR UPDATE OF part of
the query.
"""
def _get_parent_klass_info(klass_info):
concrete_model = klass_info["model"]._meta.concrete_model
for parent_model, parent_link in concrete_model._meta.parents.items():
parent_list = parent_model._meta.get_parent_list()
yield {
"model": parent_model,
"field": parent_link,
"reverse": False,
"select_fields": [
select_index
for select_index in klass_info["select_fields"]
# Selected columns from a model or its parents.
if (
self.select[select_index][0].target.model == parent_model
or self.select[select_index][0].target.model in parent_list
)
],
}
def _get_first_selected_col_from_model(klass_info):
"""
Find the first selected column from a model. If it doesn't exist,
don't lock a model.
select_fields is filled recursively, so it also contains fields
from the parent models.
"""
concrete_model = klass_info["model"]._meta.concrete_model
for select_index in klass_info["select_fields"]:
if self.select[select_index][0].target.model == concrete_model:
return self.select[select_index][0]
def _get_field_choices():
"""Yield all allowed field paths in breadth-first search order."""
queue = collections.deque([(None, self.klass_info)])
while queue:
parent_path, klass_info = queue.popleft()
if parent_path is None:
path = []
yield "self"
else:
field = klass_info["field"]
if klass_info["reverse"]:
field = field.remote_field
path = parent_path + [field.name]
yield LOOKUP_SEP.join(path)
queue.extend(
(path, klass_info)
for klass_info in _get_parent_klass_info(klass_info)
)
queue.extend(
(path, klass_info)
for klass_info in klass_info.get("related_klass_infos", [])
)
if not self.klass_info:
return []
result = []
invalid_names = []
for name in self.query.select_for_update_of:
klass_info = self.klass_info
if name == "self":
col = _get_first_selected_col_from_model(klass_info)
else:
for part in name.split(LOOKUP_SEP):
klass_infos = (
*klass_info.get("related_klass_infos", []),
*_get_parent_klass_info(klass_info),
)
for related_klass_info in klass_infos:
field = related_klass_info["field"]
if related_klass_info["reverse"]:
field = field.remote_field
if field.name == part:
klass_info = related_klass_info
break
else:
klass_info = None
break
if klass_info is None:
invalid_names.append(name)
continue
col = _get_first_selected_col_from_model(klass_info)
if col is not None:
if self.connection.features.select_for_update_of_column:
result.append(self.compile(col)[0])
else:
result.append(self.quote_name_unless_alias(col.alias))
if invalid_names:
raise FieldError(
"Invalid field name(s) given in select_for_update(of=(...)): %s. "
"Only relational fields followed in the query are allowed. "
"Choices are: %s."
% (
", ".join(invalid_names),
", ".join(_get_field_choices()),
)
)
return result
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters + field_converters, expression)
return converters
def apply_converters(self, rows, converters):
connection = self.connection
converters = list(converters.items())
for row in map(list, rows):
for pos, (convs, expression) in converters:
value = row[pos]
for converter in convs:
value = converter(value, expression, connection)
row[pos] = value
yield row
def results_iter(
self,
results=None,
tuple_expected=False,
chunked_fetch=False,
chunk_size=GET_ITERATOR_CHUNK_SIZE,
):
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(
MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size
)
fields = [s[0] for s in self.select[0 : self.col_count]]
converters = self.get_converters(fields)
rows = chain.from_iterable(results)
if converters:
rows = self.apply_converters(rows, converters)
if tuple_expected:
rows = map(tuple, rows)
return rows
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
return bool(self.execute_sql(SINGLE))
def execute_sql(
self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE
):
"""
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
result_type = result_type or NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
if chunked_fetch:
cursor = self.connection.chunked_cursor()
else:
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
# Might fail for server-side cursors (e.g. connection closed)
cursor.close()
raise
if result_type == CURSOR:
# Give the caller the cursor to process and close.
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0 : self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor,
self.connection.features.empty_fetchmany_value,
self.col_count if self.has_extra_select else None,
chunk_size,
)
if not chunked_fetch or not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further. Use chunked_fetch if requested,
# unless the database doesn't support it.
return list(result)
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = "%s.%s" % (qn(alias), qn2(columns[index]))
self.query.where.add(RawSQL("%s = %s" % (lhs_sql, rhs), lhs_params), AND)
sql, params = self.as_sql()
return "EXISTS (%s)" % sql, params
def explain_query(self):
result = list(self.execute_sql())
# Some backends return 1 item tuples with strings, and others return
# tuples with integers and strings. Flatten them out into strings.
format_ = self.query.explain_info.format
output_formatter = json.dumps if format_ and format_.lower() == "json" else str
for row in result[0]:
if not isinstance(row, str):
yield " ".join(output_formatter(c) for c in row)
else:
yield row
class SQLInsertCompiler(SQLCompiler):
returning_fields = None
returning_params = ()
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, "as_sql"):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, "get_placeholder"):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = "%s", [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, "resolve_expression"):
value = value.resolve_expression(
self.query, allow_joins=False, for_save=True
)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
"can only be used to update, not to insert." % (value, field)
)
if value.contains_aggregate:
raise FieldError(
"Aggregate functions are not allowed in this query "
"(%s=%r)." % (field.name, value)
)
if value.contains_over_clause:
raise FieldError(
"Window expressions are not allowed in this query (%s=%r)."
% (field.name, value)
)
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
insert_statement = self.connection.ops.insert_statement(
on_conflict=self.query.on_conflict,
)
result = ["%s %s" % (insert_statement, qn(opts.db_table))]
fields = self.query.fields or [opts.pk]
result.append("(%s)" % ", ".join(qn(f.column) for f in fields))
if self.query.fields:
value_rows = [
[
self.prepare_value(field, self.pre_save_val(field, obj))
for field in fields
]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [
[self.connection.ops.pk_default_value()] for _ in self.query.objs
]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (
not self.returning_fields and self.connection.features.has_bulk_insert
)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
on_conflict_suffix_sql = self.connection.ops.on_conflict_suffix_sql(
fields,
self.query.on_conflict,
self.query.update_fields,
self.query.unique_fields,
)
if (
self.returning_fields
and self.connection.features.can_return_columns_from_insert
):
if self.connection.features.can_return_rows_from_bulk_insert:
result.append(
self.connection.ops.bulk_insert_sql(fields, placeholder_rows)
)
params = param_rows
else:
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
params = [param_rows[0]]
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
# Skip empty r_sql to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
r_sql, self.returning_params = self.connection.ops.return_insert_columns(
self.returning_fields
)
if r_sql:
result.append(r_sql)
params += [self.returning_params]
return [(" ".join(result), tuple(chain.from_iterable(params)))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, returning_fields=None):
assert not (
returning_fields
and len(self.query.objs) != 1
and not self.connection.features.can_return_rows_from_bulk_insert
)
opts = self.query.get_meta()
self.returning_fields = returning_fields
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not self.returning_fields:
return []
if (
self.connection.features.can_return_rows_from_bulk_insert
and len(self.query.objs) > 1
):
rows = self.connection.ops.fetch_returned_insert_rows(cursor)
elif self.connection.features.can_return_columns_from_insert:
assert len(self.query.objs) == 1
rows = [
self.connection.ops.fetch_returned_insert_columns(
cursor,
self.returning_params,
)
]
else:
rows = [
(
self.connection.ops.last_insert_id(
cursor,
opts.db_table,
opts.pk.column,
),
)
]
cols = [field.get_col(opts.db_table) for field in self.returning_fields]
converters = self.get_converters(cols)
if converters:
rows = list(self.apply_converters(rows, converters))
return rows
class SQLDeleteCompiler(SQLCompiler):
@cached_property
def single_alias(self):
# Ensure base table is in aliases.
self.query.get_initial_alias()
return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1
@classmethod
def _expr_refs_base_model(cls, expr, base_model):
if isinstance(expr, Query):
return expr.model == base_model
if not hasattr(expr, "get_source_expressions"):
return False
return any(
cls._expr_refs_base_model(source_expr, base_model)
for source_expr in expr.get_source_expressions()
)
@cached_property
def contains_self_reference_subquery(self):
return any(
self._expr_refs_base_model(expr, self.query.model)
for expr in chain(
self.query.annotations.values(), self.query.where.children
)
)
def _as_sql(self, query):
result = ["DELETE FROM %s" % self.quote_name_unless_alias(query.base_table)]
where, params = self.compile(query.where)
if where:
result.append("WHERE %s" % where)
return " ".join(result), tuple(params)
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
if self.single_alias and not self.contains_self_reference_subquery:
return self._as_sql(self.query)
innerq = self.query.clone()
innerq.__class__ = Query
innerq.clear_select_clause()
pk = self.query.model._meta.pk
innerq.select = [pk.get_col(self.query.get_initial_alias())]
outerq = Query(self.query.model)
if not self.connection.features.update_can_self_select:
# Force the materialization of the inner query to allow reference
# to the target table on MySQL.
sql, params = innerq.get_compiler(connection=self.connection).as_sql()
innerq = RawSQL("SELECT * FROM (%s) subquery" % sql, params)
outerq.add_filter("pk__in", innerq)
return self._as_sql(outerq)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return "", ()
qn = self.quote_name_unless_alias
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, "resolve_expression"):
val = val.resolve_expression(
self.query, allow_joins=False, for_save=True
)
if val.contains_aggregate:
raise FieldError(
"Aggregate functions are not allowed in this query "
"(%s=%r)." % (field.name, val)
)
if val.contains_over_clause:
raise FieldError(
"Window expressions are not allowed in this query "
"(%s=%r)." % (field.name, val)
)
elif hasattr(val, "prepare_database_save"):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, "get_placeholder"):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = "%s"
name = field.column
if hasattr(val, "as_sql"):
sql, params = self.compile(val)
values.append("%s = %s" % (qn(name), placeholder % sql))
update_params.extend(params)
elif val is not None:
values.append("%s = %s" % (qn(name), placeholder))
update_params.append(val)
else:
values.append("%s = NULL" % qn(name))
table = self.query.base_table
result = [
"UPDATE %s SET" % qn(table),
", ".join(values),
]
where, params = self.compile(self.query.where)
if where:
result.append("WHERE %s" % where)
return " ".join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super().execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.chain(klass=Query)
query.select_related = False
query.clear_ordering(force=True)
query.extra = {}
query.select = []
meta = query.get_meta()
fields = [meta.pk.name]
related_ids_index = []
for related in self.query.related_updates:
if all(
path.join_field.primary_key for path in meta.get_path_to_parent(related)
):
# If a primary key chain exists to the targeted related update,
# then the meta.pk value can be used for it.
related_ids_index.append((related, 0))
else:
# This branch will only be reached when updating a field of an
# ancestor that is not part of the primary key chain of a MTI
# tree.
related_ids_index.append((related, len(fields)))
fields.append(related._meta.pk.name)
query.add_fields(fields)
super().pre_sql_setup()
must_pre_select = (
count > 1 and not self.connection.features.update_can_self_select
)
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.clear_where()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
related_ids = collections.defaultdict(list)
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
for parent, index in related_ids_index:
related_ids[parent].extend(r[index] for r in rows)
self.query.add_filter("pk__in", idents)
self.query.related_ids = related_ids
else:
# The fast path. Filters and updates in one query.
self.query.add_filter("pk__in", query)
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation)
ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ", ".join(sql)
params = tuple(params)
inner_query_sql, inner_query_params = self.query.inner_query.get_compiler(
self.using,
elide_empty=self.elide_empty,
).as_sql(with_col_aliases=True)
sql = "SELECT %s FROM (%s) subquery" % (sql, inner_query_sql)
params = params + inner_query_params
return sql, params
def cursor_iter(cursor, sentinel, col_count, itersize):
"""
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel):
yield rows if col_count is None else [r[:col_count] for r in rows]
finally:
cursor.close()
|
9a3f803510d1f6b3850bbad82bbb31e732c417b12a2312ee46c75bfcc216eb51 | import _thread
import copy
import datetime
import logging
import threading
import time
import warnings
from collections import deque
from contextlib import contextmanager
try:
import zoneinfo
except ImportError:
from backports import zoneinfo
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS, DatabaseError, NotSupportedError
from django.db.backends import utils
from django.db.backends.base.validation import BaseDatabaseValidation
from django.db.backends.signals import connection_created
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseErrorWrapper
from django.utils.asyncio import async_unsafe
from django.utils.functional import cached_property
NO_DB_ALIAS = "__no_db__"
RAN_DB_VERSION_CHECK = set()
logger = logging.getLogger("django.db.backends.base")
# RemovedInDjango50Warning
def timezone_constructor(tzname):
if settings.USE_DEPRECATED_PYTZ:
import pytz
return pytz.timezone(tzname)
return zoneinfo.ZoneInfo(tzname)
class BaseDatabaseWrapper:
"""Represent a database connection."""
# Mapping of Field objects to their column types.
data_types = {}
# Mapping of Field objects to their SQL suffix such as AUTOINCREMENT.
data_types_suffix = {}
# Mapping of Field objects to their SQL for CHECK constraints.
data_type_check_constraints = {}
ops = None
vendor = "unknown"
display_name = "unknown"
SchemaEditorClass = None
# Classes instantiated in __init__().
client_class = None
creation_class = None
features_class = None
introspection_class = None
ops_class = None
validation_class = BaseDatabaseValidation
queries_limit = 9000
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS):
# Connection related attributes.
# The underlying database connection.
self.connection = None
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.settings_dict = settings_dict
self.alias = alias
# Query logging in debug mode or when explicitly enabled.
self.queries_log = deque(maxlen=self.queries_limit)
self.force_debug_cursor = False
# Transaction related attributes.
# Tracks if the connection is in autocommit mode. Per PEP 249, by
# default, it isn't.
self.autocommit = False
# Tracks if the connection is in a transaction managed by 'atomic'.
self.in_atomic_block = False
# Increment to generate unique savepoint ids.
self.savepoint_state = 0
# List of savepoints created by 'atomic'.
self.savepoint_ids = []
# Stack of active 'atomic' blocks.
self.atomic_blocks = []
# Tracks if the outermost 'atomic' block should commit on exit,
# ie. if autocommit was active on entry.
self.commit_on_exit = True
# Tracks if the transaction should be rolled back to the next
# available savepoint because of an exception in an inner block.
self.needs_rollback = False
# Connection termination related attributes.
self.close_at = None
self.closed_in_transaction = False
self.errors_occurred = False
self.health_check_enabled = False
self.health_check_done = False
# Thread-safety related attributes.
self._thread_sharing_lock = threading.Lock()
self._thread_sharing_count = 0
self._thread_ident = _thread.get_ident()
# A list of no-argument functions to run when the transaction commits.
# Each entry is an (sids, func, robust) tuple, where sids is a set of
# the active savepoint IDs when this function was registered and robust
# specifies whether it's allowed for the function to fail.
self.run_on_commit = []
# Should we run the on-commit hooks the next time set_autocommit(True)
# is called?
self.run_commit_hooks_on_set_autocommit_on = False
# A stack of wrappers to be invoked around execute()/executemany()
# calls. Each entry is a function taking five arguments: execute, sql,
# params, many, and context. It's the function's responsibility to
# call execute(sql, params, many, context).
self.execute_wrappers = []
self.client = self.client_class(self)
self.creation = self.creation_class(self)
self.features = self.features_class(self)
self.introspection = self.introspection_class(self)
self.ops = self.ops_class(self)
self.validation = self.validation_class(self)
def __repr__(self):
return (
f"<{self.__class__.__qualname__} "
f"vendor={self.vendor!r} alias={self.alias!r}>"
)
def ensure_timezone(self):
"""
Ensure the connection's timezone is set to `self.timezone_name` and
return whether it changed or not.
"""
return False
@cached_property
def timezone(self):
"""
Return a tzinfo of the database connection time zone.
This is only used when time zone support is enabled. When a datetime is
read from the database, it is always returned in this time zone.
When the database backend supports time zones, it doesn't matter which
time zone Django uses, as long as aware datetimes are used everywhere.
Other users connecting to the database can choose their own time zone.
When the database backend doesn't support time zones, the time zone
Django uses may be constrained by the requirements of other users of
the database.
"""
if not settings.USE_TZ:
return None
elif self.settings_dict["TIME_ZONE"] is None:
return datetime.timezone.utc
else:
return timezone_constructor(self.settings_dict["TIME_ZONE"])
@cached_property
def timezone_name(self):
"""
Name of the time zone of the database connection.
"""
if not settings.USE_TZ:
return settings.TIME_ZONE
elif self.settings_dict["TIME_ZONE"] is None:
return "UTC"
else:
return self.settings_dict["TIME_ZONE"]
@property
def queries_logged(self):
return self.force_debug_cursor or settings.DEBUG
@property
def queries(self):
if len(self.queries_log) == self.queries_log.maxlen:
warnings.warn(
"Limit for query logging exceeded, only the last {} queries "
"will be returned.".format(self.queries_log.maxlen)
)
return list(self.queries_log)
def get_database_version(self):
"""Return a tuple of the database's version."""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require a get_database_version() "
"method."
)
def check_database_version_supported(self):
"""
Raise an error if the database version isn't supported by this
version of Django.
"""
if (
self.features.minimum_database_version is not None
and self.get_database_version() < self.features.minimum_database_version
):
db_version = ".".join(map(str, self.get_database_version()))
min_db_version = ".".join(map(str, self.features.minimum_database_version))
raise NotSupportedError(
f"{self.display_name} {min_db_version} or later is required "
f"(found {db_version})."
)
# ##### Backend-specific methods for creating connections and cursors #####
def get_connection_params(self):
"""Return a dict of parameters suitable for get_new_connection."""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require a get_connection_params() "
"method"
)
def get_new_connection(self, conn_params):
"""Open a connection to the database."""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require a get_new_connection() "
"method"
)
def init_connection_state(self):
"""Initialize the database connection settings."""
global RAN_DB_VERSION_CHECK
if self.alias not in RAN_DB_VERSION_CHECK:
self.check_database_version_supported()
RAN_DB_VERSION_CHECK.add(self.alias)
def create_cursor(self, name=None):
"""Create a cursor. Assume that a connection is established."""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require a create_cursor() method"
)
# ##### Backend-specific methods for creating connections #####
@async_unsafe
def connect(self):
"""Connect to the database. Assume that the connection is closed."""
# Check for invalid configurations.
self.check_settings()
# In case the previous connection was closed while in an atomic block
self.in_atomic_block = False
self.savepoint_ids = []
self.atomic_blocks = []
self.needs_rollback = False
# Reset parameters defining when to close/health-check the connection.
self.health_check_enabled = self.settings_dict["CONN_HEALTH_CHECKS"]
max_age = self.settings_dict["CONN_MAX_AGE"]
self.close_at = None if max_age is None else time.monotonic() + max_age
self.closed_in_transaction = False
self.errors_occurred = False
# New connections are healthy.
self.health_check_done = True
# Establish the connection
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.set_autocommit(self.settings_dict["AUTOCOMMIT"])
self.init_connection_state()
connection_created.send(sender=self.__class__, connection=self)
self.run_on_commit = []
def check_settings(self):
if self.settings_dict["TIME_ZONE"] is not None and not settings.USE_TZ:
raise ImproperlyConfigured(
"Connection '%s' cannot set TIME_ZONE because USE_TZ is False."
% self.alias
)
@async_unsafe
def ensure_connection(self):
"""Guarantee that a connection to the database is established."""
if self.connection is None:
with self.wrap_database_errors:
self.connect()
# ##### Backend-specific wrappers for PEP-249 connection methods #####
def _prepare_cursor(self, cursor):
"""
Validate the connection is usable and perform database cursor wrapping.
"""
self.validate_thread_sharing()
if self.queries_logged:
wrapped_cursor = self.make_debug_cursor(cursor)
else:
wrapped_cursor = self.make_cursor(cursor)
return wrapped_cursor
def _cursor(self, name=None):
self.close_if_health_check_failed()
self.ensure_connection()
with self.wrap_database_errors:
return self._prepare_cursor(self.create_cursor(name))
def _commit(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.rollback()
def _close(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.close()
# ##### Generic wrappers for PEP-249 connection methods #####
@async_unsafe
def cursor(self):
"""Create a cursor, opening a connection if necessary."""
return self._cursor()
@async_unsafe
def commit(self):
"""Commit a transaction and reset the dirty flag."""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._commit()
# A successful commit means that the database connection works.
self.errors_occurred = False
self.run_commit_hooks_on_set_autocommit_on = True
@async_unsafe
def rollback(self):
"""Roll back a transaction and reset the dirty flag."""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._rollback()
# A successful rollback means that the database connection works.
self.errors_occurred = False
self.needs_rollback = False
self.run_on_commit = []
@async_unsafe
def close(self):
"""Close the connection to the database."""
self.validate_thread_sharing()
self.run_on_commit = []
# Don't call validate_no_atomic_block() to avoid making it difficult
# to get rid of a connection in an invalid state. The next connect()
# will reset the transaction state anyway.
if self.closed_in_transaction or self.connection is None:
return
try:
self._close()
finally:
if self.in_atomic_block:
self.closed_in_transaction = True
self.needs_rollback = True
else:
self.connection = None
# ##### Backend-specific savepoint management methods #####
def _savepoint(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_commit_sql(sid))
def _savepoint_allowed(self):
# Savepoints cannot be created outside a transaction
return self.features.uses_savepoints and not self.get_autocommit()
# ##### Generic savepoint management methods #####
@async_unsafe
def savepoint(self):
"""
Create a savepoint inside the current transaction. Return an
identifier for the savepoint that will be used for the subsequent
rollback or commit. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
thread_ident = _thread.get_ident()
tid = str(thread_ident).replace("-", "")
self.savepoint_state += 1
sid = "s%s_x%d" % (tid, self.savepoint_state)
self.validate_thread_sharing()
self._savepoint(sid)
return sid
@async_unsafe
def savepoint_rollback(self, sid):
"""
Roll back to a savepoint. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_rollback(sid)
# Remove any callbacks registered while this savepoint was active.
self.run_on_commit = [
(sids, func, robust)
for (sids, func, robust) in self.run_on_commit
if sid not in sids
]
@async_unsafe
def savepoint_commit(self, sid):
"""
Release a savepoint. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_commit(sid)
@async_unsafe
def clean_savepoints(self):
"""
Reset the counter used to generate unique savepoint ids in this thread.
"""
self.savepoint_state = 0
# ##### Backend-specific transaction management methods #####
def _set_autocommit(self, autocommit):
"""
Backend-specific implementation to enable or disable autocommit.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require a _set_autocommit() method"
)
# ##### Generic transaction management methods #####
def get_autocommit(self):
"""Get the autocommit state."""
self.ensure_connection()
return self.autocommit
def set_autocommit(
self, autocommit, force_begin_transaction_with_broken_autocommit=False
):
"""
Enable or disable autocommit.
The usual way to start a transaction is to turn autocommit off.
SQLite does not properly start a transaction when disabling
autocommit. To avoid this buggy behavior and to actually enter a new
transaction, an explicit BEGIN is required. Using
force_begin_transaction_with_broken_autocommit=True will issue an
explicit BEGIN with SQLite. This option will be ignored for other
backends.
"""
self.validate_no_atomic_block()
self.close_if_health_check_failed()
self.ensure_connection()
start_transaction_under_autocommit = (
force_begin_transaction_with_broken_autocommit
and not autocommit
and hasattr(self, "_start_transaction_under_autocommit")
)
if start_transaction_under_autocommit:
self._start_transaction_under_autocommit()
else:
self._set_autocommit(autocommit)
self.autocommit = autocommit
if autocommit and self.run_commit_hooks_on_set_autocommit_on:
self.run_and_clear_commit_hooks()
self.run_commit_hooks_on_set_autocommit_on = False
def get_rollback(self):
"""Get the "needs rollback" flag -- for *advanced use* only."""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block."
)
return self.needs_rollback
def set_rollback(self, rollback):
"""
Set or unset the "needs rollback" flag -- for *advanced use* only.
"""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block."
)
self.needs_rollback = rollback
def validate_no_atomic_block(self):
"""Raise an error if an atomic block is active."""
if self.in_atomic_block:
raise TransactionManagementError(
"This is forbidden when an 'atomic' block is active."
)
def validate_no_broken_transaction(self):
if self.needs_rollback:
raise TransactionManagementError(
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block."
)
# ##### Foreign key constraints checks handling #####
@contextmanager
def constraint_checks_disabled(self):
"""
Disable foreign key constraint checking.
"""
disabled = self.disable_constraint_checking()
try:
yield
finally:
if disabled:
self.enable_constraint_checking()
def disable_constraint_checking(self):
"""
Backends can implement as needed to temporarily disable foreign key
constraint checking. Should return True if the constraints were
disabled and will need to be reenabled.
"""
return False
def enable_constraint_checking(self):
"""
Backends can implement as needed to re-enable foreign key constraint
checking.
"""
pass
def check_constraints(self, table_names=None):
"""
Backends can override this method if they can apply constraint
checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an
IntegrityError if any invalid foreign key references are encountered.
"""
pass
# ##### Connection termination handling #####
def is_usable(self):
"""
Test if the database connection is usable.
This method may assume that self.connection is not None.
Actual implementations should take care not to raise exceptions
as that may prevent Django from recycling unusable connections.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require an is_usable() method"
)
def close_if_health_check_failed(self):
"""Close existing connection if it fails a health check."""
if (
self.connection is None
or not self.health_check_enabled
or self.health_check_done
):
return
if not self.is_usable():
self.close()
self.health_check_done = True
def close_if_unusable_or_obsolete(self):
"""
Close the current connection if unrecoverable errors have occurred
or if it outlived its maximum age.
"""
if self.connection is not None:
self.health_check_done = False
# If the application didn't restore the original autocommit setting,
# don't take chances, drop the connection.
if self.get_autocommit() != self.settings_dict["AUTOCOMMIT"]:
self.close()
return
# If an exception other than DataError or IntegrityError occurred
# since the last commit / rollback, check if the connection works.
if self.errors_occurred:
if self.is_usable():
self.errors_occurred = False
self.health_check_done = True
else:
self.close()
return
if self.close_at is not None and time.monotonic() >= self.close_at:
self.close()
return
# ##### Thread safety handling #####
@property
def allow_thread_sharing(self):
with self._thread_sharing_lock:
return self._thread_sharing_count > 0
def inc_thread_sharing(self):
with self._thread_sharing_lock:
self._thread_sharing_count += 1
def dec_thread_sharing(self):
with self._thread_sharing_lock:
if self._thread_sharing_count <= 0:
raise RuntimeError(
"Cannot decrement the thread sharing count below zero."
)
self._thread_sharing_count -= 1
def validate_thread_sharing(self):
"""
Validate that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `inc_thread_sharing()`
method). Raise an exception if the validation fails.
"""
if not (self.allow_thread_sharing or self._thread_ident == _thread.get_ident()):
raise DatabaseError(
"DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s." % (self.alias, self._thread_ident, _thread.get_ident())
)
# ##### Miscellaneous #####
def prepare_database(self):
"""
Hook to do any database check or preparation, generally called before
migrating a project or an app.
"""
pass
@cached_property
def wrap_database_errors(self):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
return DatabaseErrorWrapper(self)
def chunked_cursor(self):
"""
Return a cursor that tries to avoid caching in the database (if
supported by the database), otherwise return a regular cursor.
"""
return self.cursor()
def make_debug_cursor(self, cursor):
"""Create a cursor that logs all queries in self.queries_log."""
return utils.CursorDebugWrapper(cursor, self)
def make_cursor(self, cursor):
"""Create a cursor without debug logging."""
return utils.CursorWrapper(cursor, self)
@contextmanager
def temporary_connection(self):
"""
Context manager that ensures that a connection is established, and
if it opened one, closes it to avoid leaving a dangling connection.
This is useful for operations outside of the request-response cycle.
Provide a cursor: with self.temporary_connection() as cursor: ...
"""
must_close = self.connection is None
try:
with self.cursor() as cursor:
yield cursor
finally:
if must_close:
self.close()
@contextmanager
def _nodb_cursor(self):
"""
Return a cursor from an alternative connection to be used when there is
no need to access the main database, specifically for test db
creation/deletion. This also prevents the production database from
being exposed to potential child threads while (or after) the test
database is destroyed. Refs #10868, #17786, #16969.
"""
conn = self.__class__({**self.settings_dict, "NAME": None}, alias=NO_DB_ALIAS)
try:
with conn.cursor() as cursor:
yield cursor
finally:
conn.close()
def schema_editor(self, *args, **kwargs):
"""
Return a new instance of this backend's SchemaEditor.
"""
if self.SchemaEditorClass is None:
raise NotImplementedError(
"The SchemaEditorClass attribute of this database wrapper is still None"
)
return self.SchemaEditorClass(self, *args, **kwargs)
def on_commit(self, func, robust=False):
if not callable(func):
raise TypeError("on_commit()'s callback must be a callable.")
if self.in_atomic_block:
# Transaction in progress; save for execution on commit.
self.run_on_commit.append((set(self.savepoint_ids), func, robust))
elif not self.get_autocommit():
raise TransactionManagementError(
"on_commit() cannot be used in manual transaction management"
)
else:
# No transaction in progress and in autocommit mode; execute
# immediately.
if robust:
try:
func()
except Exception as e:
logger.error(
f"Error calling {func.__qualname__} in on_commit() (%s).",
e,
exc_info=True,
)
else:
func()
def run_and_clear_commit_hooks(self):
self.validate_no_atomic_block()
current_run_on_commit = self.run_on_commit
self.run_on_commit = []
while current_run_on_commit:
_, func, robust = current_run_on_commit.pop(0)
if robust:
try:
func()
except Exception as e:
logger.error(
f"Error calling {func.__qualname__} in on_commit() during "
f"transaction (%s).",
e,
exc_info=True,
)
else:
func()
@contextmanager
def execute_wrapper(self, wrapper):
"""
Return a context manager under which the wrapper is applied to suitable
database query executions.
"""
self.execute_wrappers.append(wrapper)
try:
yield
finally:
self.execute_wrappers.pop()
def copy(self, alias=None):
"""
Return a copy of this connection.
For tests that require two connections to the same database.
"""
settings_dict = copy.deepcopy(self.settings_dict)
if alias is None:
alias = self.alias
return type(self)(settings_dict, alias)
|
22d9ad97a2b22d9184ef5c6e0684f230d4daac38cd9b34984db607d9a6a1e00d | from django.core.exceptions import FieldDoesNotExist
from django.db import IntegrityError, connection, migrations, models, transaction
from django.db.migrations.migration import Migration
from django.db.migrations.operations.fields import FieldOperation
from django.db.migrations.state import ModelState, ProjectState
from django.db.models.functions import Abs
from django.db.transaction import atomic
from django.test import (
SimpleTestCase,
ignore_warnings,
override_settings,
skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext
from django.utils.deprecation import RemovedInDjango51Warning
from .models import FoodManager, FoodQuerySet, UnicodeModel
from .test_base import OperationTestBase
class Mixin:
pass
class OperationTests(OperationTestBase):
"""
Tests running the operations and making sure they do what they say they do.
Each test looks at their state changing, and then their database operation -
both forwards and backwards.
"""
def test_create_model(self):
"""
Tests the CreateModel operation.
Most other tests use this operation as part of setup, so check failures
here first.
"""
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
)
self.assertEqual(operation.describe(), "Create model Pony")
self.assertEqual(operation.migration_name_fragment, "pony")
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(new_state.models["test_crmo", "pony"].name, "Pony")
self.assertEqual(len(new_state.models["test_crmo", "pony"].fields), 2)
# Test the database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["fields", "name"])
# And default manager not in set
operation = migrations.CreateModel(
"Foo", fields=[], managers=[("objects", models.Manager())]
)
definition = operation.deconstruct()
self.assertNotIn("managers", definition[2])
def test_create_model_with_duplicate_field_name(self):
with self.assertRaisesMessage(
ValueError, "Found duplicate value pink in CreateModel fields argument."
):
migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.TextField()),
("pink", models.IntegerField(default=1)),
],
)
def test_create_model_with_duplicate_base(self):
message = "Found duplicate value test_crmo.pony in CreateModel bases argument."
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
"test_crmo.Pony",
"test_crmo.Pony",
),
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
"test_crmo.Pony",
"test_crmo.pony",
),
)
message = (
"Found duplicate value migrations.unicodemodel in CreateModel bases "
"argument."
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
UnicodeModel,
UnicodeModel,
),
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
UnicodeModel,
"migrations.unicodemodel",
),
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
UnicodeModel,
"migrations.UnicodeModel",
),
)
message = (
"Found duplicate value <class 'django.db.models.base.Model'> in "
"CreateModel bases argument."
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
models.Model,
models.Model,
),
)
message = (
"Found duplicate value <class 'migrations.test_operations.Mixin'> in "
"CreateModel bases argument."
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
Mixin,
Mixin,
),
)
def test_create_model_with_duplicate_manager_name(self):
with self.assertRaisesMessage(
ValueError,
"Found duplicate value objects in CreateModel managers argument.",
):
migrations.CreateModel(
"Pony",
fields=[],
managers=[
("objects", models.Manager()),
("objects", models.Manager()),
],
)
def test_create_model_with_unique_after(self):
"""
Tests the CreateModel operation directly followed by an
AlterUniqueTogether (bug #22844 - sqlite remake issues)
"""
operation1 = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
)
operation2 = migrations.CreateModel(
"Rider",
[
("id", models.AutoField(primary_key=True)),
("number", models.IntegerField(default=1)),
("pony", models.ForeignKey("test_crmoua.Pony", models.CASCADE)),
],
)
operation3 = migrations.AlterUniqueTogether(
"Rider",
[
("number", "pony"),
],
)
# Test the database alteration
project_state = ProjectState()
self.assertTableNotExists("test_crmoua_pony")
self.assertTableNotExists("test_crmoua_rider")
with connection.schema_editor() as editor:
new_state = project_state.clone()
operation1.state_forwards("test_crmoua", new_state)
operation1.database_forwards(
"test_crmoua", editor, project_state, new_state
)
project_state, new_state = new_state, new_state.clone()
operation2.state_forwards("test_crmoua", new_state)
operation2.database_forwards(
"test_crmoua", editor, project_state, new_state
)
project_state, new_state = new_state, new_state.clone()
operation3.state_forwards("test_crmoua", new_state)
operation3.database_forwards(
"test_crmoua", editor, project_state, new_state
)
self.assertTableExists("test_crmoua_pony")
self.assertTableExists("test_crmoua_rider")
def test_create_model_m2m(self):
"""
Test the creation of a model with a ManyToMany field and the
auto-created "through" model.
"""
project_state = self.set_up_test_model("test_crmomm")
operation = migrations.CreateModel(
"Stable",
[
("id", models.AutoField(primary_key=True)),
("ponies", models.ManyToManyField("Pony", related_name="stables")),
],
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_crmomm", new_state)
# Test the database alteration
self.assertTableNotExists("test_crmomm_stable_ponies")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmomm", editor, project_state, new_state)
self.assertTableExists("test_crmomm_stable")
self.assertTableExists("test_crmomm_stable_ponies")
self.assertColumnNotExists("test_crmomm_stable", "ponies")
# Make sure the M2M field actually works
with atomic():
Pony = new_state.apps.get_model("test_crmomm", "Pony")
Stable = new_state.apps.get_model("test_crmomm", "Stable")
stable = Stable.objects.create()
p1 = Pony.objects.create(pink=False, weight=4.55)
p2 = Pony.objects.create(pink=True, weight=5.43)
stable.ponies.add(p1, p2)
self.assertEqual(stable.ponies.count(), 2)
stable.ponies.all().delete()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crmomm", editor, new_state, project_state
)
self.assertTableNotExists("test_crmomm_stable")
self.assertTableNotExists("test_crmomm_stable_ponies")
@skipUnlessDBFeature("supports_collation_on_charfield", "supports_foreign_keys")
def test_create_fk_models_to_pk_field_db_collation(self):
"""Creation of models with a FK to a PK with db_collation."""
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
app_label = "test_cfkmtopkfdbc"
operations = [
migrations.CreateModel(
"Pony",
[
(
"id",
models.CharField(
primary_key=True,
max_length=10,
db_collation=collation,
),
),
],
)
]
project_state = self.apply_operations(app_label, ProjectState(), operations)
# ForeignKey.
new_state = project_state.clone()
operation = migrations.CreateModel(
"Rider",
[
("id", models.AutoField(primary_key=True)),
("pony", models.ForeignKey("Pony", models.CASCADE)),
],
)
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnCollation(f"{app_label}_rider", "pony_id", collation)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
# OneToOneField.
new_state = project_state.clone()
operation = migrations.CreateModel(
"ShetlandPony",
[
(
"pony",
models.OneToOneField("Pony", models.CASCADE, primary_key=True),
),
("cuteness", models.IntegerField(default=1)),
],
)
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnCollation(f"{app_label}_shetlandpony", "pony_id", collation)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
def test_create_model_inheritance(self):
"""
Tests the CreateModel operation on a multi-table inheritance setup.
"""
project_state = self.set_up_test_model("test_crmoih")
# Test the state alteration
operation = migrations.CreateModel(
"ShetlandPony",
[
(
"pony_ptr",
models.OneToOneField(
"test_crmoih.Pony",
models.CASCADE,
auto_created=True,
primary_key=True,
to_field="id",
serialize=False,
),
),
("cuteness", models.IntegerField(default=1)),
],
)
new_state = project_state.clone()
operation.state_forwards("test_crmoih", new_state)
self.assertIn(("test_crmoih", "shetlandpony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crmoih_shetlandpony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmoih", editor, project_state, new_state)
self.assertTableExists("test_crmoih_shetlandpony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crmoih", editor, new_state, project_state
)
self.assertTableNotExists("test_crmoih_shetlandpony")
def test_create_proxy_model(self):
"""
CreateModel ignores proxy models.
"""
project_state = self.set_up_test_model("test_crprmo")
# Test the state alteration
operation = migrations.CreateModel(
"ProxyPony",
[],
options={"proxy": True},
bases=("test_crprmo.Pony",),
)
self.assertEqual(operation.describe(), "Create proxy model ProxyPony")
new_state = project_state.clone()
operation.state_forwards("test_crprmo", new_state)
self.assertIn(("test_crprmo", "proxypony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crprmo", editor, project_state, new_state)
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crprmo", editor, new_state, project_state
)
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["bases", "fields", "name", "options"])
def test_create_unmanaged_model(self):
"""
CreateModel ignores unmanaged models.
"""
project_state = self.set_up_test_model("test_crummo")
# Test the state alteration
operation = migrations.CreateModel(
"UnmanagedPony",
[],
options={"proxy": True},
bases=("test_crummo.Pony",),
)
self.assertEqual(operation.describe(), "Create proxy model UnmanagedPony")
new_state = project_state.clone()
operation.state_forwards("test_crummo", new_state)
self.assertIn(("test_crummo", "unmanagedpony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crummo", editor, project_state, new_state)
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crummo", editor, new_state, project_state
)
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
@skipUnlessDBFeature("supports_table_check_constraints")
def test_create_model_with_constraint(self):
where = models.Q(pink__gt=2)
check_constraint = models.CheckConstraint(
check=where, name="test_constraint_pony_pink_gt_2"
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
],
options={"constraints": [check_constraint]},
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
# Test database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
with connection.cursor() as cursor:
with self.assertRaises(IntegrityError):
cursor.execute("INSERT INTO test_crmo_pony (id, pink) VALUES (1, 1)")
# Test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Test deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2]["options"]["constraints"], [check_constraint])
@skipUnlessDBFeature("supports_table_check_constraints")
def test_create_model_with_boolean_expression_in_check_constraint(self):
app_label = "test_crmobechc"
rawsql_constraint = models.CheckConstraint(
check=models.expressions.RawSQL(
"price < %s", (1000,), output_field=models.BooleanField()
),
name=f"{app_label}_price_lt_1000_raw",
)
wrapper_constraint = models.CheckConstraint(
check=models.expressions.ExpressionWrapper(
models.Q(price__gt=500) | models.Q(price__lt=500),
output_field=models.BooleanField(),
),
name=f"{app_label}_price_neq_500_wrap",
)
operation = migrations.CreateModel(
"Product",
[
("id", models.AutoField(primary_key=True)),
("price", models.IntegerField(null=True)),
],
options={"constraints": [rawsql_constraint, wrapper_constraint]},
)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Add table.
self.assertTableNotExists(app_label)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertTableExists(f"{app_label}_product")
insert_sql = f"INSERT INTO {app_label}_product (id, price) VALUES (%d, %d)"
with connection.cursor() as cursor:
with self.assertRaises(IntegrityError):
cursor.execute(insert_sql % (1, 1000))
cursor.execute(insert_sql % (1, 999))
with self.assertRaises(IntegrityError):
cursor.execute(insert_sql % (2, 500))
cursor.execute(insert_sql % (2, 499))
def test_create_model_with_partial_unique_constraint(self):
partial_unique_constraint = models.UniqueConstraint(
fields=["pink"],
condition=models.Q(weight__gt=5),
name="test_constraint_pony_pink_for_weight_gt_5_uniq",
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
("weight", models.FloatField()),
],
options={"constraints": [partial_unique_constraint]},
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
# Test database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
# Test constraint works
Pony = new_state.apps.get_model("test_crmo", "Pony")
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=6.0)
if connection.features.supports_partial_indexes:
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=7.0)
else:
Pony.objects.create(pink=1, weight=7.0)
# Test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Test deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2]["options"]["constraints"], [partial_unique_constraint]
)
def test_create_model_with_deferred_unique_constraint(self):
deferred_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="deferrable_pink_constraint",
deferrable=models.Deferrable.DEFERRED,
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
],
options={"constraints": [deferred_unique_constraint]},
)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
self.assertTableNotExists("test_crmo_pony")
# Create table.
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
Pony = new_state.apps.get_model("test_crmo", "Pony")
Pony.objects.create(pink=1)
if connection.features.supports_deferrable_unique_constraints:
# Unique constraint is deferred.
with transaction.atomic():
obj = Pony.objects.create(pink=1)
obj.pink = 2
obj.save()
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(
deferred_unique_constraint.name
)
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name)
obj = Pony.objects.create(pink=1)
obj.pink = 3
obj.save()
else:
Pony.objects.create(pink=1)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2]["options"]["constraints"],
[deferred_unique_constraint],
)
@skipUnlessDBFeature("supports_covering_indexes")
def test_create_model_with_covering_unique_constraint(self):
covering_unique_constraint = models.UniqueConstraint(
fields=["pink"],
include=["weight"],
name="test_constraint_pony_pink_covering_weight",
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
("weight", models.FloatField()),
],
options={"constraints": [covering_unique_constraint]},
)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
self.assertTableNotExists("test_crmo_pony")
# Create table.
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
Pony = new_state.apps.get_model("test_crmo", "Pony")
Pony.objects.create(pink=1, weight=4.0)
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=7.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2]["options"]["constraints"],
[covering_unique_constraint],
)
def test_create_model_managers(self):
"""
The managers on a model are set.
"""
project_state = self.set_up_test_model("test_cmoma")
# Test the state alteration
operation = migrations.CreateModel(
"Food",
fields=[
("id", models.AutoField(primary_key=True)),
],
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
],
)
self.assertEqual(operation.describe(), "Create model Food")
new_state = project_state.clone()
operation.state_forwards("test_cmoma", new_state)
self.assertIn(("test_cmoma", "food"), new_state.models)
managers = new_state.models["test_cmoma", "food"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
def test_delete_model(self):
"""
Tests the DeleteModel operation.
"""
project_state = self.set_up_test_model("test_dlmo")
# Test the state alteration
operation = migrations.DeleteModel("Pony")
self.assertEqual(operation.describe(), "Delete model Pony")
self.assertEqual(operation.migration_name_fragment, "delete_pony")
new_state = project_state.clone()
operation.state_forwards("test_dlmo", new_state)
self.assertNotIn(("test_dlmo", "pony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dlmo", editor, project_state, new_state)
self.assertTableNotExists("test_dlmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_dlmo", editor, new_state, project_state)
self.assertTableExists("test_dlmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "DeleteModel")
self.assertEqual(definition[1], [])
self.assertEqual(list(definition[2]), ["name"])
def test_delete_proxy_model(self):
"""
Tests the DeleteModel operation ignores proxy models.
"""
project_state = self.set_up_test_model("test_dlprmo", proxy_model=True)
# Test the state alteration
operation = migrations.DeleteModel("ProxyPony")
new_state = project_state.clone()
operation.state_forwards("test_dlprmo", new_state)
self.assertIn(("test_dlprmo", "proxypony"), project_state.models)
self.assertNotIn(("test_dlprmo", "proxypony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dlprmo", editor, project_state, new_state)
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_dlprmo", editor, new_state, project_state
)
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
def test_delete_mti_model(self):
project_state = self.set_up_test_model("test_dlmtimo", mti_model=True)
# Test the state alteration
operation = migrations.DeleteModel("ShetlandPony")
new_state = project_state.clone()
operation.state_forwards("test_dlmtimo", new_state)
self.assertIn(("test_dlmtimo", "shetlandpony"), project_state.models)
self.assertNotIn(("test_dlmtimo", "shetlandpony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlmtimo_pony")
self.assertTableExists("test_dlmtimo_shetlandpony")
self.assertColumnExists("test_dlmtimo_shetlandpony", "pony_ptr_id")
with connection.schema_editor() as editor:
operation.database_forwards(
"test_dlmtimo", editor, project_state, new_state
)
self.assertTableExists("test_dlmtimo_pony")
self.assertTableNotExists("test_dlmtimo_shetlandpony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_dlmtimo", editor, new_state, project_state
)
self.assertTableExists("test_dlmtimo_pony")
self.assertTableExists("test_dlmtimo_shetlandpony")
self.assertColumnExists("test_dlmtimo_shetlandpony", "pony_ptr_id")
def test_rename_model(self):
"""
Tests the RenameModel operation.
"""
project_state = self.set_up_test_model("test_rnmo", related_model=True)
# Test the state alteration
operation = migrations.RenameModel("Pony", "Horse")
self.assertEqual(operation.describe(), "Rename model Pony to Horse")
self.assertEqual(operation.migration_name_fragment, "rename_pony_horse")
# Test initial state and database
self.assertIn(("test_rnmo", "pony"), project_state.models)
self.assertNotIn(("test_rnmo", "horse"), project_state.models)
self.assertTableExists("test_rnmo_pony")
self.assertTableNotExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")
)
self.assertFKNotExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")
)
# Migrate forwards
new_state = project_state.clone()
atomic_rename = connection.features.supports_atomic_references_rename
new_state = self.apply_operations(
"test_rnmo", new_state, [operation], atomic=atomic_rename
)
# Test new state and database
self.assertNotIn(("test_rnmo", "pony"), new_state.models)
self.assertIn(("test_rnmo", "horse"), new_state.models)
# RenameModel also repoints all incoming FKs and M2Ms
self.assertEqual(
new_state.models["test_rnmo", "rider"].fields["pony"].remote_field.model,
"test_rnmo.Horse",
)
self.assertTableNotExists("test_rnmo_pony")
self.assertTableExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKNotExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")
)
self.assertFKExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")
)
# Migrate backwards
original_state = self.unapply_operations(
"test_rnmo", project_state, [operation], atomic=atomic_rename
)
# Test original state and database
self.assertIn(("test_rnmo", "pony"), original_state.models)
self.assertNotIn(("test_rnmo", "horse"), original_state.models)
self.assertEqual(
original_state.models["test_rnmo", "rider"]
.fields["pony"]
.remote_field.model,
"Pony",
)
self.assertTableExists("test_rnmo_pony")
self.assertTableNotExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")
)
self.assertFKNotExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameModel")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"old_name": "Pony", "new_name": "Horse"})
def test_rename_model_state_forwards(self):
"""
RenameModel operations shouldn't trigger the caching of rendered apps
on state without prior apps.
"""
state = ProjectState()
state.add_model(ModelState("migrations", "Foo", []))
operation = migrations.RenameModel("Foo", "Bar")
operation.state_forwards("migrations", state)
self.assertNotIn("apps", state.__dict__)
self.assertNotIn(("migrations", "foo"), state.models)
self.assertIn(("migrations", "bar"), state.models)
# Now with apps cached.
apps = state.apps
operation = migrations.RenameModel("Bar", "Foo")
operation.state_forwards("migrations", state)
self.assertIs(state.apps, apps)
self.assertNotIn(("migrations", "bar"), state.models)
self.assertIn(("migrations", "foo"), state.models)
def test_rename_model_with_self_referential_fk(self):
"""
Tests the RenameModel operation on model with self referential FK.
"""
project_state = self.set_up_test_model("test_rmwsrf", related_model=True)
# Test the state alteration
operation = migrations.RenameModel("Rider", "HorseRider")
self.assertEqual(operation.describe(), "Rename model Rider to HorseRider")
new_state = project_state.clone()
operation.state_forwards("test_rmwsrf", new_state)
self.assertNotIn(("test_rmwsrf", "rider"), new_state.models)
self.assertIn(("test_rmwsrf", "horserider"), new_state.models)
# Remember, RenameModel also repoints all incoming FKs and M2Ms
self.assertEqual(
"self",
new_state.models["test_rmwsrf", "horserider"]
.fields["friend"]
.remote_field.model,
)
HorseRider = new_state.apps.get_model("test_rmwsrf", "horserider")
self.assertIs(
HorseRider._meta.get_field("horserider").remote_field.model, HorseRider
)
# Test the database alteration
self.assertTableExists("test_rmwsrf_rider")
self.assertTableNotExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id")
)
self.assertFKNotExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id")
)
atomic_rename = connection.features.supports_atomic_references_rename
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_forwards("test_rmwsrf", editor, project_state, new_state)
self.assertTableNotExists("test_rmwsrf_rider")
self.assertTableExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKNotExists(
"test_rmwsrf_horserider", ["friend_id"], ("test_rmwsrf_rider", "id")
)
self.assertFKExists(
"test_rmwsrf_horserider",
["friend_id"],
("test_rmwsrf_horserider", "id"),
)
# And test reversal
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_backwards(
"test_rmwsrf", editor, new_state, project_state
)
self.assertTableExists("test_rmwsrf_rider")
self.assertTableNotExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id")
)
self.assertFKNotExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id")
)
def test_rename_model_with_superclass_fk(self):
"""
Tests the RenameModel operation on a model which has a superclass that
has a foreign key.
"""
project_state = self.set_up_test_model(
"test_rmwsc", related_model=True, mti_model=True
)
# Test the state alteration
operation = migrations.RenameModel("ShetlandPony", "LittleHorse")
self.assertEqual(
operation.describe(), "Rename model ShetlandPony to LittleHorse"
)
new_state = project_state.clone()
operation.state_forwards("test_rmwsc", new_state)
self.assertNotIn(("test_rmwsc", "shetlandpony"), new_state.models)
self.assertIn(("test_rmwsc", "littlehorse"), new_state.models)
# RenameModel shouldn't repoint the superclass's relations, only local ones
self.assertEqual(
project_state.models["test_rmwsc", "rider"]
.fields["pony"]
.remote_field.model,
new_state.models["test_rmwsc", "rider"].fields["pony"].remote_field.model,
)
# Before running the migration we have a table for Shetland Pony, not
# Little Horse.
self.assertTableExists("test_rmwsc_shetlandpony")
self.assertTableNotExists("test_rmwsc_littlehorse")
if connection.features.supports_foreign_keys:
# and the foreign key on rider points to pony, not shetland pony
self.assertFKExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id")
)
self.assertFKNotExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_shetlandpony", "id")
)
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
operation.database_forwards("test_rmwsc", editor, project_state, new_state)
# Now we have a little horse table, not shetland pony
self.assertTableNotExists("test_rmwsc_shetlandpony")
self.assertTableExists("test_rmwsc_littlehorse")
if connection.features.supports_foreign_keys:
# but the Foreign keys still point at pony, not little horse
self.assertFKExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id")
)
self.assertFKNotExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_littlehorse", "id")
)
def test_rename_model_with_self_referential_m2m(self):
app_label = "test_rename_model_with_self_referential_m2m"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"ReflexivePony",
fields=[
("id", models.AutoField(primary_key=True)),
("ponies", models.ManyToManyField("self")),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("ReflexivePony", "ReflexivePony2"),
],
atomic=connection.features.supports_atomic_references_rename,
)
Pony = project_state.apps.get_model(app_label, "ReflexivePony2")
pony = Pony.objects.create()
pony.ponies.add(pony)
def test_rename_model_with_m2m(self):
app_label = "test_rename_model_with_m2m"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
],
),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("Pony", "Pony2"),
],
atomic=connection.features.supports_atomic_references_rename,
)
Pony = project_state.apps.get_model(app_label, "Pony2")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
self.assertEqual(Pony.objects.count(), 2)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(
Pony._meta.get_field("riders").remote_field.through.objects.count(), 2
)
def test_rename_model_with_db_table_rename_m2m(self):
app_label = "test_rmwdbrm2m"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
],
options={"db_table": "pony"},
),
],
)
new_state = self.apply_operations(
app_label,
project_state,
operations=[migrations.RenameModel("Pony", "PinkPony")],
atomic=connection.features.supports_atomic_references_rename,
)
Pony = new_state.apps.get_model(app_label, "PinkPony")
Rider = new_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
def test_rename_m2m_target_model(self):
app_label = "test_rename_m2m_target_model"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
],
),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("Rider", "Rider2"),
],
atomic=connection.features.supports_atomic_references_rename,
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider2")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
self.assertEqual(Pony.objects.count(), 2)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(
Pony._meta.get_field("riders").remote_field.through.objects.count(), 2
)
def test_rename_m2m_through_model(self):
app_label = "test_rename_through"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"rider",
models.ForeignKey(
"test_rename_through.Rider", models.CASCADE
),
),
(
"pony",
models.ForeignKey(
"test_rename_through.Pony", models.CASCADE
),
),
],
),
migrations.AddField(
"Pony",
"riders",
models.ManyToManyField(
"test_rename_through.Rider",
through="test_rename_through.PonyRider",
),
),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
PonyRider = project_state.apps.get_model(app_label, "PonyRider")
pony = Pony.objects.create()
rider = Rider.objects.create()
PonyRider.objects.create(pony=pony, rider=rider)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("PonyRider", "PonyRider2"),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
PonyRider = project_state.apps.get_model(app_label, "PonyRider2")
pony = Pony.objects.first()
rider = Rider.objects.create()
PonyRider.objects.create(pony=pony, rider=rider)
self.assertEqual(Pony.objects.count(), 1)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(PonyRider.objects.count(), 2)
self.assertEqual(pony.riders.count(), 2)
def test_rename_m2m_model_after_rename_field(self):
"""RenameModel renames a many-to-many column after a RenameField."""
app_label = "test_rename_multiple"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=20)),
],
),
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey(
"test_rename_multiple.Pony", models.CASCADE
),
),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
],
),
migrations.RenameField(
model_name="pony", old_name="name", new_name="fancy_name"
),
migrations.RenameModel(old_name="Rider", new_name="Jockey"),
],
atomic=connection.features.supports_atomic_references_rename,
)
Pony = project_state.apps.get_model(app_label, "Pony")
Jockey = project_state.apps.get_model(app_label, "Jockey")
PonyRider = project_state.apps.get_model(app_label, "PonyRider")
# No "no such column" error means the column was renamed correctly.
pony = Pony.objects.create(fancy_name="a good name")
jockey = Jockey.objects.create(pony=pony)
ponyrider = PonyRider.objects.create()
ponyrider.riders.add(jockey)
def test_add_field(self):
"""
Tests the AddField operation.
"""
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=5),
)
self.assertEqual(operation.describe(), "Add field height to Pony")
self.assertEqual(operation.migration_name_fragment, "pony_height")
project_state, new_state = self.make_test_state("test_adfl", operation)
self.assertEqual(len(new_state.models["test_adfl", "pony"].fields), 4)
field = new_state.models["test_adfl", "pony"].fields["height"]
self.assertEqual(field.default, 5)
# Test the database alteration
self.assertColumnNotExists("test_adfl_pony", "height")
with connection.schema_editor() as editor:
operation.database_forwards("test_adfl", editor, project_state, new_state)
self.assertColumnExists("test_adfl_pony", "height")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adfl", editor, new_state, project_state)
self.assertColumnNotExists("test_adfl_pony", "height")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"])
def test_add_charfield(self):
"""
Tests the AddField operation on TextField.
"""
project_state = self.set_up_test_model("test_adchfl")
Pony = project_state.apps.get_model("test_adchfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations(
"test_adchfl",
project_state,
[
migrations.AddField(
"Pony",
"text",
models.CharField(max_length=10, default="some text"),
),
migrations.AddField(
"Pony",
"empty",
models.CharField(max_length=10, default=""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.CharField(max_length=10, default="42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.CharField(max_length=10, default='"\'"'),
),
],
)
Pony = new_state.apps.get_model("test_adchfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
self.assertEqual(pony.text, "some text")
self.assertEqual(pony.empty, "")
self.assertEqual(pony.digits, "42")
self.assertEqual(pony.quotes, '"\'"')
def test_add_textfield(self):
"""
Tests the AddField operation on TextField.
"""
project_state = self.set_up_test_model("test_adtxtfl")
Pony = project_state.apps.get_model("test_adtxtfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations(
"test_adtxtfl",
project_state,
[
migrations.AddField(
"Pony",
"text",
models.TextField(default="some text"),
),
migrations.AddField(
"Pony",
"empty",
models.TextField(default=""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.TextField(default="42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.TextField(default='"\'"'),
),
],
)
Pony = new_state.apps.get_model("test_adtxtfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
self.assertEqual(pony.text, "some text")
self.assertEqual(pony.empty, "")
self.assertEqual(pony.digits, "42")
self.assertEqual(pony.quotes, '"\'"')
def test_add_binaryfield(self):
"""
Tests the AddField operation on TextField/BinaryField.
"""
project_state = self.set_up_test_model("test_adbinfl")
Pony = project_state.apps.get_model("test_adbinfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations(
"test_adbinfl",
project_state,
[
migrations.AddField(
"Pony",
"blob",
models.BinaryField(default=b"some text"),
),
migrations.AddField(
"Pony",
"empty",
models.BinaryField(default=b""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.BinaryField(default=b"42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.BinaryField(default=b'"\'"'),
),
],
)
Pony = new_state.apps.get_model("test_adbinfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
# SQLite returns buffer/memoryview, cast to bytes for checking.
self.assertEqual(bytes(pony.blob), b"some text")
self.assertEqual(bytes(pony.empty), b"")
self.assertEqual(bytes(pony.digits), b"42")
self.assertEqual(bytes(pony.quotes), b'"\'"')
def test_column_name_quoting(self):
"""
Column names that are SQL keywords shouldn't cause problems when used
in migrations (#22168).
"""
project_state = self.set_up_test_model("test_regr22168")
operation = migrations.AddField(
"Pony",
"order",
models.IntegerField(default=0),
)
new_state = project_state.clone()
operation.state_forwards("test_regr22168", new_state)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_regr22168", editor, project_state, new_state
)
self.assertColumnExists("test_regr22168_pony", "order")
def test_add_field_preserve_default(self):
"""
Tests the AddField operation's state alteration
when preserve_default = False.
"""
project_state = self.set_up_test_model("test_adflpd")
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=4),
preserve_default=False,
)
new_state = project_state.clone()
operation.state_forwards("test_adflpd", new_state)
self.assertEqual(len(new_state.models["test_adflpd", "pony"].fields), 4)
field = new_state.models["test_adflpd", "pony"].fields["height"]
self.assertEqual(field.default, models.NOT_PROVIDED)
# Test the database alteration
project_state.apps.get_model("test_adflpd", "pony").objects.create(
weight=4,
)
self.assertColumnNotExists("test_adflpd_pony", "height")
with connection.schema_editor() as editor:
operation.database_forwards("test_adflpd", editor, project_state, new_state)
self.assertColumnExists("test_adflpd_pony", "height")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddField")
self.assertEqual(definition[1], [])
self.assertEqual(
sorted(definition[2]), ["field", "model_name", "name", "preserve_default"]
)
def test_add_field_m2m(self):
"""
Tests the AddField operation with a ManyToManyField.
"""
project_state = self.set_up_test_model("test_adflmm", second_model=True)
# Test the state alteration
operation = migrations.AddField(
"Pony", "stables", models.ManyToManyField("Stable", related_name="ponies")
)
new_state = project_state.clone()
operation.state_forwards("test_adflmm", new_state)
self.assertEqual(len(new_state.models["test_adflmm", "pony"].fields), 4)
# Test the database alteration
self.assertTableNotExists("test_adflmm_pony_stables")
with connection.schema_editor() as editor:
operation.database_forwards("test_adflmm", editor, project_state, new_state)
self.assertTableExists("test_adflmm_pony_stables")
self.assertColumnNotExists("test_adflmm_pony", "stables")
# Make sure the M2M field actually works
with atomic():
Pony = new_state.apps.get_model("test_adflmm", "Pony")
p = Pony.objects.create(pink=False, weight=4.55)
p.stables.create()
self.assertEqual(p.stables.count(), 1)
p.stables.all().delete()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_adflmm", editor, new_state, project_state
)
self.assertTableNotExists("test_adflmm_pony_stables")
def test_alter_field_m2m(self):
project_state = self.set_up_test_model("test_alflmm", second_model=True)
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField("Stable", related_name="ponies"),
)
],
)
Pony = project_state.apps.get_model("test_alflmm", "Pony")
self.assertFalse(Pony._meta.get_field("stables").blank)
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AlterField(
"Pony",
"stables",
models.ManyToManyField(
to="Stable", related_name="ponies", blank=True
),
)
],
)
Pony = project_state.apps.get_model("test_alflmm", "Pony")
self.assertTrue(Pony._meta.get_field("stables").blank)
def test_repoint_field_m2m(self):
project_state = self.set_up_test_model(
"test_alflmm", second_model=True, third_model=True
)
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AddField(
"Pony",
"places",
models.ManyToManyField("Stable", related_name="ponies"),
)
],
)
Pony = project_state.apps.get_model("test_alflmm", "Pony")
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AlterField(
"Pony",
"places",
models.ManyToManyField(to="Van", related_name="ponies"),
)
],
)
# Ensure the new field actually works
Pony = project_state.apps.get_model("test_alflmm", "Pony")
p = Pony.objects.create(pink=False, weight=4.55)
p.places.create()
self.assertEqual(p.places.count(), 1)
p.places.all().delete()
def test_remove_field_m2m(self):
project_state = self.set_up_test_model("test_rmflmm", second_model=True)
project_state = self.apply_operations(
"test_rmflmm",
project_state,
operations=[
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField("Stable", related_name="ponies"),
)
],
)
self.assertTableExists("test_rmflmm_pony_stables")
with_field_state = project_state.clone()
operations = [migrations.RemoveField("Pony", "stables")]
project_state = self.apply_operations(
"test_rmflmm", project_state, operations=operations
)
self.assertTableNotExists("test_rmflmm_pony_stables")
# And test reversal
self.unapply_operations("test_rmflmm", with_field_state, operations=operations)
self.assertTableExists("test_rmflmm_pony_stables")
def test_remove_field_m2m_with_through(self):
project_state = self.set_up_test_model("test_rmflmmwt", second_model=True)
self.assertTableNotExists("test_rmflmmwt_ponystables")
project_state = self.apply_operations(
"test_rmflmmwt",
project_state,
operations=[
migrations.CreateModel(
"PonyStables",
fields=[
(
"pony",
models.ForeignKey("test_rmflmmwt.Pony", models.CASCADE),
),
(
"stable",
models.ForeignKey("test_rmflmmwt.Stable", models.CASCADE),
),
],
),
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField(
"Stable",
related_name="ponies",
through="test_rmflmmwt.PonyStables",
),
),
],
)
self.assertTableExists("test_rmflmmwt_ponystables")
operations = [
migrations.RemoveField("Pony", "stables"),
migrations.DeleteModel("PonyStables"),
]
self.apply_operations("test_rmflmmwt", project_state, operations=operations)
def test_remove_field(self):
"""
Tests the RemoveField operation.
"""
project_state = self.set_up_test_model("test_rmfl")
# Test the state alteration
operation = migrations.RemoveField("Pony", "pink")
self.assertEqual(operation.describe(), "Remove field pink from Pony")
self.assertEqual(operation.migration_name_fragment, "remove_pony_pink")
new_state = project_state.clone()
operation.state_forwards("test_rmfl", new_state)
self.assertEqual(len(new_state.models["test_rmfl", "pony"].fields), 2)
# Test the database alteration
self.assertColumnExists("test_rmfl_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_rmfl", editor, project_state, new_state)
self.assertColumnNotExists("test_rmfl_pony", "pink")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rmfl", editor, new_state, project_state)
self.assertColumnExists("test_rmfl_pony", "pink")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveField")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": "pink"})
def test_remove_fk(self):
"""
Tests the RemoveField operation on a foreign key.
"""
project_state = self.set_up_test_model("test_rfk", related_model=True)
self.assertColumnExists("test_rfk_rider", "pony_id")
operation = migrations.RemoveField("Rider", "pony")
new_state = project_state.clone()
operation.state_forwards("test_rfk", new_state)
with connection.schema_editor() as editor:
operation.database_forwards("test_rfk", editor, project_state, new_state)
self.assertColumnNotExists("test_rfk_rider", "pony_id")
with connection.schema_editor() as editor:
operation.database_backwards("test_rfk", editor, new_state, project_state)
self.assertColumnExists("test_rfk_rider", "pony_id")
def test_alter_model_table(self):
"""
Tests the AlterModelTable operation.
"""
project_state = self.set_up_test_model("test_almota")
# Test the state alteration
operation = migrations.AlterModelTable("Pony", "test_almota_pony_2")
self.assertEqual(
operation.describe(), "Rename table for Pony to test_almota_pony_2"
)
self.assertEqual(operation.migration_name_fragment, "alter_pony_table")
new_state = project_state.clone()
operation.state_forwards("test_almota", new_state)
self.assertEqual(
new_state.models["test_almota", "pony"].options["db_table"],
"test_almota_pony_2",
)
# Test the database alteration
self.assertTableExists("test_almota_pony")
self.assertTableNotExists("test_almota_pony_2")
with connection.schema_editor() as editor:
operation.database_forwards("test_almota", editor, project_state, new_state)
self.assertTableNotExists("test_almota_pony")
self.assertTableExists("test_almota_pony_2")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_almota", editor, new_state, project_state
)
self.assertTableExists("test_almota_pony")
self.assertTableNotExists("test_almota_pony_2")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelTable")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"name": "Pony", "table": "test_almota_pony_2"})
def test_alter_model_table_none(self):
"""
Tests the AlterModelTable operation if the table name is set to None.
"""
operation = migrations.AlterModelTable("Pony", None)
self.assertEqual(operation.describe(), "Rename table for Pony to (default)")
def test_alter_model_table_noop(self):
"""
Tests the AlterModelTable operation if the table name is not changed.
"""
project_state = self.set_up_test_model("test_almota")
# Test the state alteration
operation = migrations.AlterModelTable("Pony", "test_almota_pony")
new_state = project_state.clone()
operation.state_forwards("test_almota", new_state)
self.assertEqual(
new_state.models["test_almota", "pony"].options["db_table"],
"test_almota_pony",
)
# Test the database alteration
self.assertTableExists("test_almota_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_almota", editor, project_state, new_state)
self.assertTableExists("test_almota_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_almota", editor, new_state, project_state
)
self.assertTableExists("test_almota_pony")
def test_alter_model_table_m2m(self):
"""
AlterModelTable should rename auto-generated M2M tables.
"""
app_label = "test_talflmltlm2m"
pony_db_table = "pony_foo"
project_state = self.set_up_test_model(
app_label, second_model=True, db_table=pony_db_table
)
# Add the M2M field
first_state = project_state.clone()
operation = migrations.AddField(
"Pony", "stables", models.ManyToManyField("Stable")
)
operation.state_forwards(app_label, first_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, first_state)
original_m2m_table = "%s_%s" % (pony_db_table, "stables")
new_m2m_table = "%s_%s" % (app_label, "pony_stables")
self.assertTableExists(original_m2m_table)
self.assertTableNotExists(new_m2m_table)
# Rename the Pony db_table which should also rename the m2m table.
second_state = first_state.clone()
operation = migrations.AlterModelTable(name="pony", table=None)
operation.state_forwards(app_label, second_state)
atomic_rename = connection.features.supports_atomic_references_rename
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_forwards(app_label, editor, first_state, second_state)
self.assertTableExists(new_m2m_table)
self.assertTableNotExists(original_m2m_table)
# And test reversal
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_backwards(app_label, editor, second_state, first_state)
self.assertTableExists(original_m2m_table)
self.assertTableNotExists(new_m2m_table)
def test_alter_field(self):
"""
Tests the AlterField operation.
"""
project_state = self.set_up_test_model("test_alfl")
# Test the state alteration
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(null=True)
)
self.assertEqual(operation.describe(), "Alter field pink on Pony")
self.assertEqual(operation.migration_name_fragment, "alter_pony_pink")
new_state = project_state.clone()
operation.state_forwards("test_alfl", new_state)
self.assertIs(
project_state.models["test_alfl", "pony"].fields["pink"].null, False
)
self.assertIs(new_state.models["test_alfl", "pony"].fields["pink"].null, True)
# Test the database alteration
self.assertColumnNotNull("test_alfl_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_alfl", editor, project_state, new_state)
self.assertColumnNull("test_alfl_pony", "pink")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alfl", editor, new_state, project_state)
self.assertColumnNotNull("test_alfl_pony", "pink")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"])
def test_alter_field_add_db_column_noop(self):
"""
AlterField operation is a noop when adding only a db_column and the
column name is not changed.
"""
app_label = "test_afadbn"
project_state = self.set_up_test_model(app_label, related_model=True)
pony_table = "%s_pony" % app_label
new_state = project_state.clone()
operation = migrations.AlterField(
"Pony", "weight", models.FloatField(db_column="weight")
)
operation.state_forwards(app_label, new_state)
self.assertIsNone(
project_state.models[app_label, "pony"].fields["weight"].db_column,
)
self.assertEqual(
new_state.models[app_label, "pony"].fields["weight"].db_column,
"weight",
)
self.assertColumnExists(pony_table, "weight")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnExists(pony_table, "weight")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_backwards(
app_label, editor, new_state, project_state
)
self.assertColumnExists(pony_table, "weight")
rider_table = "%s_rider" % app_label
new_state = project_state.clone()
operation = migrations.AlterField(
"Rider",
"pony",
models.ForeignKey("Pony", models.CASCADE, db_column="pony_id"),
)
operation.state_forwards(app_label, new_state)
self.assertIsNone(
project_state.models[app_label, "rider"].fields["pony"].db_column,
)
self.assertIs(
new_state.models[app_label, "rider"].fields["pony"].db_column,
"pony_id",
)
self.assertColumnExists(rider_table, "pony_id")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnExists(rider_table, "pony_id")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(app_label, editor, new_state, project_state)
self.assertColumnExists(rider_table, "pony_id")
def test_alter_field_pk(self):
"""
The AlterField operation on primary keys (things like PostgreSQL's
SERIAL weirdness).
"""
project_state = self.set_up_test_model("test_alflpk")
# Test the state alteration
operation = migrations.AlterField(
"Pony", "id", models.IntegerField(primary_key=True)
)
new_state = project_state.clone()
operation.state_forwards("test_alflpk", new_state)
self.assertIsInstance(
project_state.models["test_alflpk", "pony"].fields["id"],
models.AutoField,
)
self.assertIsInstance(
new_state.models["test_alflpk", "pony"].fields["id"],
models.IntegerField,
)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alflpk", editor, project_state, new_state)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alflpk", editor, new_state, project_state
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_pk_fk(self):
"""
Tests the AlterField operation on primary keys changes any FKs pointing to it.
"""
project_state = self.set_up_test_model("test_alflpkfk", related_model=True)
project_state = self.apply_operations(
"test_alflpkfk",
project_state,
[
migrations.CreateModel(
"Stable",
fields=[
("ponies", models.ManyToManyField("Pony")),
],
),
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField("Stable"),
),
],
)
# Test the state alteration
operation = migrations.AlterField(
"Pony", "id", models.FloatField(primary_key=True)
)
new_state = project_state.clone()
operation.state_forwards("test_alflpkfk", new_state)
self.assertIsInstance(
project_state.models["test_alflpkfk", "pony"].fields["id"],
models.AutoField,
)
self.assertIsInstance(
new_state.models["test_alflpkfk", "pony"].fields["id"],
models.FloatField,
)
def assertIdTypeEqualsFkType():
with connection.cursor() as cursor:
id_type, id_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor, "test_alflpkfk_pony"
)
if c.name == "id"
][0]
fk_type, fk_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor, "test_alflpkfk_rider"
)
if c.name == "pony_id"
][0]
m2m_fk_type, m2m_fk_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor,
"test_alflpkfk_pony_stables",
)
if c.name == "pony_id"
][0]
remote_m2m_fk_type, remote_m2m_fk_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor,
"test_alflpkfk_stable_ponies",
)
if c.name == "pony_id"
][0]
self.assertEqual(id_type, fk_type)
self.assertEqual(id_type, m2m_fk_type)
self.assertEqual(id_type, remote_m2m_fk_type)
self.assertEqual(id_null, fk_null)
self.assertEqual(id_null, m2m_fk_null)
self.assertEqual(id_null, remote_m2m_fk_null)
assertIdTypeEqualsFkType()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_alflpkfk", editor, project_state, new_state
)
assertIdTypeEqualsFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_alflpkfk_pony_stables",
["pony_id"],
("test_alflpkfk_pony", "id"),
)
self.assertFKExists(
"test_alflpkfk_stable_ponies",
["pony_id"],
("test_alflpkfk_pony", "id"),
)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alflpkfk", editor, new_state, project_state
)
assertIdTypeEqualsFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_alflpkfk_pony_stables",
["pony_id"],
("test_alflpkfk_pony", "id"),
)
self.assertFKExists(
"test_alflpkfk_stable_ponies",
["pony_id"],
("test_alflpkfk_pony", "id"),
)
@skipUnlessDBFeature("supports_collation_on_charfield", "supports_foreign_keys")
def test_alter_field_pk_fk_db_collation(self):
"""
AlterField operation of db_collation on primary keys changes any FKs
pointing to it.
"""
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
app_label = "test_alflpkfkdbc"
project_state = self.apply_operations(
app_label,
ProjectState(),
[
migrations.CreateModel(
"Pony",
[
("id", models.CharField(primary_key=True, max_length=10)),
],
),
migrations.CreateModel(
"Rider",
[
("pony", models.ForeignKey("Pony", models.CASCADE)),
],
),
migrations.CreateModel(
"Stable",
[
("ponies", models.ManyToManyField("Pony")),
],
),
],
)
# State alteration.
operation = migrations.AlterField(
"Pony",
"id",
models.CharField(
primary_key=True,
max_length=10,
db_collation=collation,
),
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Database alteration.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnCollation(f"{app_label}_pony", "id", collation)
self.assertColumnCollation(f"{app_label}_rider", "pony_id", collation)
self.assertColumnCollation(f"{app_label}_stable_ponies", "pony_id", collation)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
def test_alter_field_pk_mti_fk(self):
app_label = "test_alflpkmtifk"
project_state = self.set_up_test_model(app_label, mti_model=True)
project_state = self.apply_operations(
app_label,
project_state,
[
migrations.CreateModel(
"ShetlandRider",
fields=[
(
"pony",
models.ForeignKey(
f"{app_label}.ShetlandPony", models.CASCADE
),
),
],
),
],
)
operation = migrations.AlterField(
"Pony",
"id",
models.BigAutoField(primary_key=True),
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertIsInstance(
new_state.models[app_label, "pony"].fields["id"],
models.BigAutoField,
)
def _get_column_id_type(cursor, table, column):
return [
c.type_code
for c in connection.introspection.get_table_description(
cursor,
f"{app_label}_{table}",
)
if c.name == column
][0]
def assertIdTypeEqualsMTIFkType():
with connection.cursor() as cursor:
parent_id_type = _get_column_id_type(cursor, "pony", "id")
child_id_type = _get_column_id_type(
cursor, "shetlandpony", "pony_ptr_id"
)
mti_id_type = _get_column_id_type(cursor, "shetlandrider", "pony_id")
self.assertEqual(parent_id_type, child_id_type)
self.assertEqual(parent_id_type, mti_id_type)
assertIdTypeEqualsMTIFkType()
# Alter primary key.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
assertIdTypeEqualsMTIFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
f"{app_label}_shetlandpony",
["pony_ptr_id"],
(f"{app_label}_pony", "id"),
)
self.assertFKExists(
f"{app_label}_shetlandrider",
["pony_id"],
(f"{app_label}_shetlandpony", "pony_ptr_id"),
)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
assertIdTypeEqualsMTIFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
f"{app_label}_shetlandpony",
["pony_ptr_id"],
(f"{app_label}_pony", "id"),
)
self.assertFKExists(
f"{app_label}_shetlandrider",
["pony_id"],
(f"{app_label}_shetlandpony", "pony_ptr_id"),
)
def test_alter_field_pk_mti_and_fk_to_base(self):
app_label = "test_alflpkmtiftb"
project_state = self.set_up_test_model(
app_label,
mti_model=True,
related_model=True,
)
operation = migrations.AlterField(
"Pony",
"id",
models.BigAutoField(primary_key=True),
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertIsInstance(
new_state.models[app_label, "pony"].fields["id"],
models.BigAutoField,
)
def _get_column_id_type(cursor, table, column):
return [
c.type_code
for c in connection.introspection.get_table_description(
cursor,
f"{app_label}_{table}",
)
if c.name == column
][0]
def assertIdTypeEqualsMTIFkType():
with connection.cursor() as cursor:
parent_id_type = _get_column_id_type(cursor, "pony", "id")
fk_id_type = _get_column_id_type(cursor, "rider", "pony_id")
child_id_type = _get_column_id_type(
cursor, "shetlandpony", "pony_ptr_id"
)
self.assertEqual(parent_id_type, child_id_type)
self.assertEqual(parent_id_type, fk_id_type)
assertIdTypeEqualsMTIFkType()
# Alter primary key.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
assertIdTypeEqualsMTIFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
f"{app_label}_shetlandpony",
["pony_ptr_id"],
(f"{app_label}_pony", "id"),
)
self.assertFKExists(
f"{app_label}_rider",
["pony_id"],
(f"{app_label}_pony", "id"),
)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
assertIdTypeEqualsMTIFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
f"{app_label}_shetlandpony",
["pony_ptr_id"],
(f"{app_label}_pony", "id"),
)
self.assertFKExists(
f"{app_label}_rider",
["pony_id"],
(f"{app_label}_pony", "id"),
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_reloads_state_on_fk_with_to_field_target_type_change(self):
app_label = "test_alflrsfkwtflttc"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("code", models.IntegerField(unique=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
(
"rider",
models.ForeignKey(
"%s.Rider" % app_label, models.CASCADE, to_field="code"
),
),
],
),
],
)
operation = migrations.AlterField(
"Rider",
"code",
models.CharField(max_length=100, unique=True),
)
self.apply_operations(app_label, project_state, operations=[operation])
id_type, id_null = [
(c.type_code, c.null_ok)
for c in self.get_table_description("%s_rider" % app_label)
if c.name == "code"
][0]
fk_type, fk_null = [
(c.type_code, c.null_ok)
for c in self.get_table_description("%s_pony" % app_label)
if c.name == "rider_id"
][0]
self.assertEqual(id_type, fk_type)
self.assertEqual(id_null, fk_null)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_reloads_state_fk_with_to_field_related_name_target_type_change(
self,
):
app_label = "test_alflrsfkwtflrnttc"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("code", models.PositiveIntegerField(unique=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
(
"rider",
models.ForeignKey(
"%s.Rider" % app_label,
models.CASCADE,
to_field="code",
related_name="+",
),
),
],
),
],
)
operation = migrations.AlterField(
"Rider",
"code",
models.CharField(max_length=100, unique=True),
)
self.apply_operations(app_label, project_state, operations=[operation])
def test_alter_field_reloads_state_on_fk_target_changes(self):
"""
If AlterField doesn't reload state appropriately, the second AlterField
crashes on MySQL due to not dropping the PonyRider.pony foreign key
constraint before modifying the column.
"""
app_label = "alter_alter_field_reloads_state_on_fk_target_changes"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
(
"rider",
models.ForeignKey("%s.Rider" % app_label, models.CASCADE),
),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey("%s.Pony" % app_label, models.CASCADE),
),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.AlterField(
"Rider", "id", models.CharField(primary_key=True, max_length=99)
),
migrations.AlterField(
"Pony", "id", models.CharField(primary_key=True, max_length=99)
),
],
)
def test_alter_field_reloads_state_on_fk_with_to_field_target_changes(self):
"""
If AlterField doesn't reload state appropriately, the second AlterField
crashes on MySQL due to not dropping the PonyRider.pony foreign key
constraint before modifying the column.
"""
app_label = "alter_alter_field_reloads_state_on_fk_with_to_field_target_changes"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
("slug", models.CharField(unique=True, max_length=100)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
(
"rider",
models.ForeignKey(
"%s.Rider" % app_label, models.CASCADE, to_field="slug"
),
),
("slug", models.CharField(unique=True, max_length=100)),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey(
"%s.Pony" % app_label, models.CASCADE, to_field="slug"
),
),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.AlterField(
"Rider", "slug", models.CharField(unique=True, max_length=99)
),
migrations.AlterField(
"Pony", "slug", models.CharField(unique=True, max_length=99)
),
],
)
def test_rename_field_reloads_state_on_fk_target_changes(self):
"""
If RenameField doesn't reload state appropriately, the AlterField
crashes on MySQL due to not dropping the PonyRider.pony foreign key
constraint before modifying the column.
"""
app_label = "alter_rename_field_reloads_state_on_fk_target_changes"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
(
"rider",
models.ForeignKey("%s.Rider" % app_label, models.CASCADE),
),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey("%s.Pony" % app_label, models.CASCADE),
),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameField("Rider", "id", "id2"),
migrations.AlterField(
"Pony", "id", models.CharField(primary_key=True, max_length=99)
),
],
atomic=connection.features.supports_atomic_references_rename,
)
def test_rename_field(self):
"""
Tests the RenameField operation.
"""
project_state = self.set_up_test_model("test_rnfl")
operation = migrations.RenameField("Pony", "pink", "blue")
self.assertEqual(operation.describe(), "Rename field pink on Pony to blue")
self.assertEqual(operation.migration_name_fragment, "rename_pink_pony_blue")
new_state = project_state.clone()
operation.state_forwards("test_rnfl", new_state)
self.assertIn("blue", new_state.models["test_rnfl", "pony"].fields)
self.assertNotIn("pink", new_state.models["test_rnfl", "pony"].fields)
# Rename field.
self.assertColumnExists("test_rnfl_pony", "pink")
self.assertColumnNotExists("test_rnfl_pony", "blue")
with connection.schema_editor() as editor:
operation.database_forwards("test_rnfl", editor, project_state, new_state)
self.assertColumnExists("test_rnfl_pony", "blue")
self.assertColumnNotExists("test_rnfl_pony", "pink")
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards("test_rnfl", editor, new_state, project_state)
self.assertColumnExists("test_rnfl_pony", "pink")
self.assertColumnNotExists("test_rnfl_pony", "blue")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameField")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "old_name": "pink", "new_name": "blue"},
)
def test_rename_field_unique_together(self):
project_state = self.set_up_test_model("test_rnflut", unique_together=True)
operation = migrations.RenameField("Pony", "pink", "blue")
new_state = project_state.clone()
operation.state_forwards("test_rnflut", new_state)
# unique_together has the renamed column.
self.assertIn(
"blue",
new_state.models["test_rnflut", "pony"].options["unique_together"][0],
)
self.assertNotIn(
"pink",
new_state.models["test_rnflut", "pony"].options["unique_together"][0],
)
# Rename field.
self.assertColumnExists("test_rnflut_pony", "pink")
self.assertColumnNotExists("test_rnflut_pony", "blue")
with connection.schema_editor() as editor:
operation.database_forwards("test_rnflut", editor, project_state, new_state)
self.assertColumnExists("test_rnflut_pony", "blue")
self.assertColumnNotExists("test_rnflut_pony", "pink")
# The unique constraint has been ported over.
with connection.cursor() as cursor:
cursor.execute("INSERT INTO test_rnflut_pony (blue, weight) VALUES (1, 1)")
with self.assertRaises(IntegrityError):
with atomic():
cursor.execute(
"INSERT INTO test_rnflut_pony (blue, weight) VALUES (1, 1)"
)
cursor.execute("DELETE FROM test_rnflut_pony")
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(
"test_rnflut", editor, new_state, project_state
)
self.assertColumnExists("test_rnflut_pony", "pink")
self.assertColumnNotExists("test_rnflut_pony", "blue")
@ignore_warnings(category=RemovedInDjango51Warning)
def test_rename_field_index_together(self):
project_state = self.set_up_test_model("test_rnflit", index_together=True)
operation = migrations.RenameField("Pony", "pink", "blue")
new_state = project_state.clone()
operation.state_forwards("test_rnflit", new_state)
self.assertIn("blue", new_state.models["test_rnflit", "pony"].fields)
self.assertNotIn("pink", new_state.models["test_rnflit", "pony"].fields)
# index_together has the renamed column.
self.assertIn(
"blue", new_state.models["test_rnflit", "pony"].options["index_together"][0]
)
self.assertNotIn(
"pink", new_state.models["test_rnflit", "pony"].options["index_together"][0]
)
# Rename field.
self.assertColumnExists("test_rnflit_pony", "pink")
self.assertColumnNotExists("test_rnflit_pony", "blue")
with connection.schema_editor() as editor:
operation.database_forwards("test_rnflit", editor, project_state, new_state)
self.assertColumnExists("test_rnflit_pony", "blue")
self.assertColumnNotExists("test_rnflit_pony", "pink")
# The index constraint has been ported over.
self.assertIndexExists("test_rnflit_pony", ["weight", "blue"])
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(
"test_rnflit", editor, new_state, project_state
)
self.assertIndexExists("test_rnflit_pony", ["weight", "pink"])
def test_rename_field_with_db_column(self):
project_state = self.apply_operations(
"test_rfwdbc",
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField(db_column="db_field")),
(
"fk_field",
models.ForeignKey(
"Pony",
models.CASCADE,
db_column="db_fk_field",
),
),
],
),
],
)
new_state = project_state.clone()
operation = migrations.RenameField("Pony", "field", "renamed_field")
operation.state_forwards("test_rfwdbc", new_state)
self.assertIn("renamed_field", new_state.models["test_rfwdbc", "pony"].fields)
self.assertNotIn("field", new_state.models["test_rfwdbc", "pony"].fields)
self.assertColumnExists("test_rfwdbc_pony", "db_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(
"test_rfwdbc", editor, project_state, new_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_backwards(
"test_rfwdbc", editor, new_state, project_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_field")
new_state = project_state.clone()
operation = migrations.RenameField("Pony", "fk_field", "renamed_fk_field")
operation.state_forwards("test_rfwdbc", new_state)
self.assertIn(
"renamed_fk_field", new_state.models["test_rfwdbc", "pony"].fields
)
self.assertNotIn("fk_field", new_state.models["test_rfwdbc", "pony"].fields)
self.assertColumnExists("test_rfwdbc_pony", "db_fk_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(
"test_rfwdbc", editor, project_state, new_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_fk_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_backwards(
"test_rfwdbc", editor, new_state, project_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_fk_field")
def test_rename_field_case(self):
project_state = self.apply_operations(
"test_rfmx",
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField()),
],
),
],
)
new_state = project_state.clone()
operation = migrations.RenameField("Pony", "field", "FiElD")
operation.state_forwards("test_rfmx", new_state)
self.assertIn("FiElD", new_state.models["test_rfmx", "pony"].fields)
self.assertColumnExists("test_rfmx_pony", "field")
with connection.schema_editor() as editor:
operation.database_forwards("test_rfmx", editor, project_state, new_state)
self.assertColumnExists(
"test_rfmx_pony",
connection.introspection.identifier_converter("FiElD"),
)
with connection.schema_editor() as editor:
operation.database_backwards("test_rfmx", editor, new_state, project_state)
self.assertColumnExists("test_rfmx_pony", "field")
def test_rename_missing_field(self):
state = ProjectState()
state.add_model(ModelState("app", "model", []))
with self.assertRaisesMessage(
FieldDoesNotExist, "app.model has no field named 'field'"
):
migrations.RenameField("model", "field", "new_field").state_forwards(
"app", state
)
def test_rename_referenced_field_state_forward(self):
state = ProjectState()
state.add_model(
ModelState(
"app",
"Model",
[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField(unique=True)),
],
)
)
state.add_model(
ModelState(
"app",
"OtherModel",
[
("id", models.AutoField(primary_key=True)),
(
"fk",
models.ForeignKey("Model", models.CASCADE, to_field="field"),
),
(
"fo",
models.ForeignObject(
"Model",
models.CASCADE,
from_fields=("fk",),
to_fields=("field",),
),
),
],
)
)
operation = migrations.RenameField("Model", "field", "renamed")
new_state = state.clone()
operation.state_forwards("app", new_state)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fk"].remote_field.field_name,
"renamed",
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fk"].from_fields, ["self"]
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fk"].to_fields, ("renamed",)
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].from_fields, ("fk",)
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].to_fields, ("renamed",)
)
operation = migrations.RenameField("OtherModel", "fk", "renamed_fk")
new_state = state.clone()
operation.state_forwards("app", new_state)
self.assertEqual(
new_state.models["app", "othermodel"]
.fields["renamed_fk"]
.remote_field.field_name,
"renamed",
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["renamed_fk"].from_fields,
("self",),
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["renamed_fk"].to_fields,
("renamed",),
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].from_fields,
("renamed_fk",),
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].to_fields, ("renamed",)
)
def test_alter_unique_together(self):
"""
Tests the AlterUniqueTogether operation.
"""
project_state = self.set_up_test_model("test_alunto")
# Test the state alteration
operation = migrations.AlterUniqueTogether("Pony", [("pink", "weight")])
self.assertEqual(
operation.describe(), "Alter unique_together for Pony (1 constraint(s))"
)
self.assertEqual(
operation.migration_name_fragment,
"alter_pony_unique_together",
)
new_state = project_state.clone()
operation.state_forwards("test_alunto", new_state)
self.assertEqual(
len(
project_state.models["test_alunto", "pony"].options.get(
"unique_together", set()
)
),
0,
)
self.assertEqual(
len(
new_state.models["test_alunto", "pony"].options.get(
"unique_together", set()
)
),
1,
)
# Make sure we can insert duplicate rows
with connection.cursor() as cursor:
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_alunto", editor, project_state, new_state
)
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
with self.assertRaises(IntegrityError):
with atomic():
cursor.execute(
"INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)"
)
cursor.execute("DELETE FROM test_alunto_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alunto", editor, new_state, project_state
)
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# Test flat unique_together
operation = migrations.AlterUniqueTogether("Pony", ("pink", "weight"))
operation.state_forwards("test_alunto", new_state)
self.assertEqual(
len(
new_state.models["test_alunto", "pony"].options.get(
"unique_together", set()
)
),
1,
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterUniqueTogether")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"name": "Pony", "unique_together": {("pink", "weight")}}
)
def test_alter_unique_together_remove(self):
operation = migrations.AlterUniqueTogether("Pony", None)
self.assertEqual(
operation.describe(), "Alter unique_together for Pony (0 constraint(s))"
)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_unique_together_on_pk_field(self):
app_label = "test_rutopkf"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[("id", models.AutoField(primary_key=True))],
options={"unique_together": {("id",)}},
),
],
)
table_name = f"{app_label}_pony"
pk_constraint_name = f"{table_name}_pkey"
unique_together_constraint_name = f"{table_name}_id_fb61f881_uniq"
self.assertConstraintExists(table_name, pk_constraint_name, value=False)
self.assertConstraintExists(
table_name, unique_together_constraint_name, value=False
)
new_state = project_state.clone()
operation = migrations.AlterUniqueTogether("Pony", set())
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertConstraintExists(table_name, pk_constraint_name, value=False)
self.assertConstraintNotExists(table_name, unique_together_constraint_name)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_unique_together_on_unique_field(self):
app_label = "test_rutouf"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=30, unique=True)),
],
options={"unique_together": {("name",)}},
),
],
)
table_name = f"{app_label}_pony"
unique_constraint_name = f"{table_name}_name_key"
unique_together_constraint_name = f"{table_name}_name_694f3b9f_uniq"
self.assertConstraintExists(table_name, unique_constraint_name, value=False)
self.assertConstraintExists(
table_name, unique_together_constraint_name, value=False
)
new_state = project_state.clone()
operation = migrations.AlterUniqueTogether("Pony", set())
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertConstraintExists(table_name, unique_constraint_name, value=False)
self.assertConstraintNotExists(table_name, unique_together_constraint_name)
def test_add_index(self):
"""
Test the AddIndex operation.
"""
project_state = self.set_up_test_model("test_adin")
msg = (
"Indexes passed to AddIndex operations require a name argument. "
"<Index: fields=['pink']> doesn't have one."
)
with self.assertRaisesMessage(ValueError, msg):
migrations.AddIndex("Pony", models.Index(fields=["pink"]))
index = models.Index(fields=["pink"], name="test_adin_pony_pink_idx")
operation = migrations.AddIndex("Pony", index)
self.assertEqual(
operation.describe(),
"Create index test_adin_pony_pink_idx on field(s) pink of model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"pony_test_adin_pony_pink_idx",
)
new_state = project_state.clone()
operation.state_forwards("test_adin", new_state)
# Test the database alteration
self.assertEqual(
len(new_state.models["test_adin", "pony"].options["indexes"]), 1
)
self.assertIndexNotExists("test_adin_pony", ["pink"])
with connection.schema_editor() as editor:
operation.database_forwards("test_adin", editor, project_state, new_state)
self.assertIndexExists("test_adin_pony", ["pink"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adin", editor, new_state, project_state)
self.assertIndexNotExists("test_adin_pony", ["pink"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "index": index})
def test_remove_index(self):
"""
Test the RemoveIndex operation.
"""
project_state = self.set_up_test_model("test_rmin", multicol_index=True)
self.assertTableExists("test_rmin_pony")
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
operation = migrations.RemoveIndex("Pony", "pony_test_idx")
self.assertEqual(operation.describe(), "Remove index pony_test_idx from Pony")
self.assertEqual(
operation.migration_name_fragment,
"remove_pony_pony_test_idx",
)
new_state = project_state.clone()
operation.state_forwards("test_rmin", new_state)
# Test the state alteration
self.assertEqual(
len(new_state.models["test_rmin", "pony"].options["indexes"]), 0
)
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_rmin", editor, project_state, new_state)
self.assertIndexNotExists("test_rmin_pony", ["pink", "weight"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rmin", editor, new_state, project_state)
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": "pony_test_idx"})
# Also test a field dropped with index - sqlite remake issue
operations = [
migrations.RemoveIndex("Pony", "pony_test_idx"),
migrations.RemoveField("Pony", "pink"),
]
self.assertColumnExists("test_rmin_pony", "pink")
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
# Test database alteration
new_state = project_state.clone()
self.apply_operations("test_rmin", new_state, operations=operations)
self.assertColumnNotExists("test_rmin_pony", "pink")
self.assertIndexNotExists("test_rmin_pony", ["pink", "weight"])
# And test reversal
self.unapply_operations("test_rmin", project_state, operations=operations)
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
def test_rename_index(self):
app_label = "test_rnin"
project_state = self.set_up_test_model(app_label, index=True)
table_name = app_label + "_pony"
self.assertIndexNameExists(table_name, "pony_pink_idx")
self.assertIndexNameNotExists(table_name, "new_pony_test_idx")
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_test_idx", old_name="pony_pink_idx"
)
self.assertEqual(
operation.describe(),
"Rename index pony_pink_idx on Pony to new_pony_test_idx",
)
self.assertEqual(
operation.migration_name_fragment,
"rename_pony_pink_idx_new_pony_test_idx",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Rename index.
expected_queries = 1 if connection.features.can_rename_index else 2
with connection.schema_editor() as editor, self.assertNumQueries(
expected_queries
):
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameNotExists(table_name, "pony_pink_idx")
self.assertIndexNameExists(table_name, "new_pony_test_idx")
# Reversal.
with connection.schema_editor() as editor, self.assertNumQueries(
expected_queries
):
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, "pony_pink_idx")
self.assertIndexNameNotExists(table_name, "new_pony_test_idx")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameIndex")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"old_name": "pony_pink_idx",
"new_name": "new_pony_test_idx",
},
)
def test_rename_index_arguments(self):
msg = "RenameIndex.old_name and old_fields are mutually exclusive."
with self.assertRaisesMessage(ValueError, msg):
migrations.RenameIndex(
"Pony",
new_name="new_idx_name",
old_name="old_idx_name",
old_fields=("weight", "pink"),
)
msg = "RenameIndex requires one of old_name and old_fields arguments to be set."
with self.assertRaisesMessage(ValueError, msg):
migrations.RenameIndex("Pony", new_name="new_idx_name")
@ignore_warnings(category=RemovedInDjango51Warning)
def test_rename_index_unnamed_index(self):
app_label = "test_rninui"
project_state = self.set_up_test_model(app_label, index_together=True)
table_name = app_label + "_pony"
self.assertIndexNameNotExists(table_name, "new_pony_test_idx")
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_test_idx", old_fields=("weight", "pink")
)
self.assertEqual(
operation.describe(),
"Rename unnamed index for ('weight', 'pink') on Pony to new_pony_test_idx",
)
self.assertEqual(
operation.migration_name_fragment,
"rename_pony_weight_pink_new_pony_test_idx",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Rename index.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameExists(table_name, "new_pony_test_idx")
# Reverse is a no-op.
with connection.schema_editor() as editor, self.assertNumQueries(0):
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, "new_pony_test_idx")
# Reapply, RenameIndex operation is a noop when the old and new name
# match.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, "new_pony_test_idx")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameIndex")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"new_name": "new_pony_test_idx",
"old_fields": ("weight", "pink"),
},
)
def test_rename_index_unknown_unnamed_index(self):
app_label = "test_rninuui"
project_state = self.set_up_test_model(app_label)
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_test_idx", old_fields=("weight", "pink")
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
msg = "Found wrong number (0) of indexes for test_rninuui_pony(weight, pink)."
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
operation.database_forwards(app_label, editor, project_state, new_state)
def test_add_index_state_forwards(self):
project_state = self.set_up_test_model("test_adinsf")
index = models.Index(fields=["pink"], name="test_adinsf_pony_pink_idx")
old_model = project_state.apps.get_model("test_adinsf", "Pony")
new_state = project_state.clone()
operation = migrations.AddIndex("Pony", index)
operation.state_forwards("test_adinsf", new_state)
new_model = new_state.apps.get_model("test_adinsf", "Pony")
self.assertIsNot(old_model, new_model)
def test_remove_index_state_forwards(self):
project_state = self.set_up_test_model("test_rminsf")
index = models.Index(fields=["pink"], name="test_rminsf_pony_pink_idx")
migrations.AddIndex("Pony", index).state_forwards("test_rminsf", project_state)
old_model = project_state.apps.get_model("test_rminsf", "Pony")
new_state = project_state.clone()
operation = migrations.RemoveIndex("Pony", "test_rminsf_pony_pink_idx")
operation.state_forwards("test_rminsf", new_state)
new_model = new_state.apps.get_model("test_rminsf", "Pony")
self.assertIsNot(old_model, new_model)
def test_rename_index_state_forwards(self):
app_label = "test_rnidsf"
project_state = self.set_up_test_model(app_label, index=True)
old_model = project_state.apps.get_model(app_label, "Pony")
new_state = project_state.clone()
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_pink_idx", old_name="pony_pink_idx"
)
operation.state_forwards(app_label, new_state)
new_model = new_state.apps.get_model(app_label, "Pony")
self.assertIsNot(old_model, new_model)
self.assertEqual(new_model._meta.indexes[0].name, "new_pony_pink_idx")
@ignore_warnings(category=RemovedInDjango51Warning)
def test_rename_index_state_forwards_unnamed_index(self):
app_label = "test_rnidsfui"
project_state = self.set_up_test_model(app_label, index_together=True)
old_model = project_state.apps.get_model(app_label, "Pony")
new_state = project_state.clone()
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_pink_idx", old_fields=("weight", "pink")
)
operation.state_forwards(app_label, new_state)
new_model = new_state.apps.get_model(app_label, "Pony")
self.assertIsNot(old_model, new_model)
self.assertEqual(new_model._meta.index_together, tuple())
self.assertEqual(new_model._meta.indexes[0].name, "new_pony_pink_idx")
@skipUnlessDBFeature("supports_expression_indexes")
def test_add_func_index(self):
app_label = "test_addfuncin"
index_name = f"{app_label}_pony_abs_idx"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(app_label)
index = models.Index(Abs("weight"), name=index_name)
operation = migrations.AddIndex("Pony", index)
self.assertEqual(
operation.describe(),
"Create index test_addfuncin_pony_abs_idx on Abs(F(weight)) on model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"pony_test_addfuncin_pony_abs_idx",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(len(new_state.models[app_label, "pony"].options["indexes"]), 1)
self.assertIndexNameNotExists(table_name, index_name)
# Add index.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameExists(table_name, index_name)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameNotExists(table_name, index_name)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "index": index})
@skipUnlessDBFeature("supports_expression_indexes")
def test_remove_func_index(self):
app_label = "test_rmfuncin"
index_name = f"{app_label}_pony_abs_idx"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(
app_label,
indexes=[
models.Index(Abs("weight"), name=index_name),
],
)
self.assertTableExists(table_name)
self.assertIndexNameExists(table_name, index_name)
operation = migrations.RemoveIndex("Pony", index_name)
self.assertEqual(
operation.describe(),
"Remove index test_rmfuncin_pony_abs_idx from Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"remove_pony_test_rmfuncin_pony_abs_idx",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(len(new_state.models[app_label, "pony"].options["indexes"]), 0)
# Remove index.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameNotExists(table_name, index_name)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, index_name)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": index_name})
@skipUnlessDBFeature("supports_expression_indexes")
def test_alter_field_with_func_index(self):
app_label = "test_alfuncin"
index_name = f"{app_label}_pony_idx"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(
app_label,
indexes=[models.Index(Abs("pink"), name=index_name)],
)
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(null=True)
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameExists(table_name, index_name)
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, index_name)
def test_alter_field_with_index(self):
"""
Test AlterField operation with an index to ensure indexes created via
Meta.indexes don't get dropped with sqlite3 remake.
"""
project_state = self.set_up_test_model("test_alflin", index=True)
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(null=True)
)
new_state = project_state.clone()
operation.state_forwards("test_alflin", new_state)
# Test the database alteration
self.assertColumnNotNull("test_alflin_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_alflin", editor, project_state, new_state)
# Index hasn't been dropped
self.assertIndexExists("test_alflin_pony", ["pink"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alflin", editor, new_state, project_state
)
# Ensure the index is still there
self.assertIndexExists("test_alflin_pony", ["pink"])
@ignore_warnings(category=RemovedInDjango51Warning)
def test_alter_index_together(self):
"""
Tests the AlterIndexTogether operation.
"""
project_state = self.set_up_test_model("test_alinto")
# Test the state alteration
operation = migrations.AlterIndexTogether("Pony", [("pink", "weight")])
self.assertEqual(
operation.describe(), "Alter index_together for Pony (1 constraint(s))"
)
self.assertEqual(
operation.migration_name_fragment,
"alter_pony_index_together",
)
new_state = project_state.clone()
operation.state_forwards("test_alinto", new_state)
self.assertEqual(
len(
project_state.models["test_alinto", "pony"].options.get(
"index_together", set()
)
),
0,
)
self.assertEqual(
len(
new_state.models["test_alinto", "pony"].options.get(
"index_together", set()
)
),
1,
)
# Make sure there's no matching index
self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"])
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alinto", editor, project_state, new_state)
self.assertIndexExists("test_alinto_pony", ["pink", "weight"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alinto", editor, new_state, project_state
)
self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterIndexTogether")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"name": "Pony", "index_together": {("pink", "weight")}}
)
def test_alter_index_together_remove(self):
operation = migrations.AlterIndexTogether("Pony", None)
self.assertEqual(
operation.describe(), "Alter index_together for Pony (0 constraint(s))"
)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
@ignore_warnings(category=RemovedInDjango51Warning)
def test_alter_index_together_remove_with_unique_together(self):
app_label = "test_alintoremove_wunto"
table_name = "%s_pony" % app_label
project_state = self.set_up_test_model(app_label, unique_together=True)
self.assertUniqueConstraintExists(table_name, ["pink", "weight"])
# Add index together.
new_state = project_state.clone()
operation = migrations.AlterIndexTogether("Pony", [("pink", "weight")])
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexExists(table_name, ["pink", "weight"])
# Remove index together.
project_state = new_state
new_state = project_state.clone()
operation = migrations.AlterIndexTogether("Pony", set())
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNotExists(table_name, ["pink", "weight"])
self.assertUniqueConstraintExists(table_name, ["pink", "weight"])
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_constraint(self):
project_state = self.set_up_test_model("test_addconstraint")
gt_check = models.Q(pink__gt=2)
gt_constraint = models.CheckConstraint(
check=gt_check, name="test_add_constraint_pony_pink_gt_2"
)
gt_operation = migrations.AddConstraint("Pony", gt_constraint)
self.assertEqual(
gt_operation.describe(),
"Create constraint test_add_constraint_pony_pink_gt_2 on model Pony",
)
self.assertEqual(
gt_operation.migration_name_fragment,
"pony_test_add_constraint_pony_pink_gt_2",
)
# Test the state alteration
new_state = project_state.clone()
gt_operation.state_forwards("test_addconstraint", new_state)
self.assertEqual(
len(new_state.models["test_addconstraint", "pony"].options["constraints"]),
1,
)
Pony = new_state.apps.get_model("test_addconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
# Test the database alteration
with connection.schema_editor() as editor:
gt_operation.database_forwards(
"test_addconstraint", editor, project_state, new_state
)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=1.0)
# Add another one.
lt_check = models.Q(pink__lt=100)
lt_constraint = models.CheckConstraint(
check=lt_check, name="test_add_constraint_pony_pink_lt_100"
)
lt_operation = migrations.AddConstraint("Pony", lt_constraint)
lt_operation.state_forwards("test_addconstraint", new_state)
self.assertEqual(
len(new_state.models["test_addconstraint", "pony"].options["constraints"]),
2,
)
Pony = new_state.apps.get_model("test_addconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 2)
with connection.schema_editor() as editor:
lt_operation.database_forwards(
"test_addconstraint", editor, project_state, new_state
)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=100, weight=1.0)
# Test reversal
with connection.schema_editor() as editor:
gt_operation.database_backwards(
"test_addconstraint", editor, new_state, project_state
)
Pony.objects.create(pink=1, weight=1.0)
# Test deconstruction
definition = gt_operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"model_name": "Pony", "constraint": gt_constraint}
)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_constraint_percent_escaping(self):
app_label = "add_constraint_string_quoting"
operations = [
migrations.CreateModel(
"Author",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
("surname", models.CharField(max_length=100, default="")),
("rebate", models.CharField(max_length=100)),
],
),
]
from_state = self.apply_operations(app_label, ProjectState(), operations)
# "%" generated in startswith lookup should be escaped in a way that is
# considered a leading wildcard.
check = models.Q(name__startswith="Albert")
constraint = models.CheckConstraint(check=check, name="name_constraint")
operation = migrations.AddConstraint("Author", constraint)
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Author = to_state.apps.get_model(app_label, "Author")
with self.assertRaises(IntegrityError), transaction.atomic():
Author.objects.create(name="Artur")
# Literal "%" should be escaped in a way that is not a considered a
# wildcard.
check = models.Q(rebate__endswith="%")
constraint = models.CheckConstraint(check=check, name="rebate_constraint")
operation = migrations.AddConstraint("Author", constraint)
from_state = to_state
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
Author = to_state.apps.get_model(app_label, "Author")
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Author = to_state.apps.get_model(app_label, "Author")
with self.assertRaises(IntegrityError), transaction.atomic():
Author.objects.create(name="Albert", rebate="10$")
author = Author.objects.create(name="Albert", rebate="10%")
self.assertEqual(Author.objects.get(), author)
# Right-hand-side baked "%" literals should not be used for parameters
# interpolation.
check = ~models.Q(surname__startswith=models.F("name"))
constraint = models.CheckConstraint(check=check, name="name_constraint_rhs")
operation = migrations.AddConstraint("Author", constraint)
from_state = to_state
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Author = to_state.apps.get_model(app_label, "Author")
with self.assertRaises(IntegrityError), transaction.atomic():
Author.objects.create(name="Albert", surname="Alberto")
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_or_constraint(self):
app_label = "test_addorconstraint"
constraint_name = "add_constraint_or"
from_state = self.set_up_test_model(app_label)
check = models.Q(pink__gt=2, weight__gt=2) | models.Q(weight__lt=0)
constraint = models.CheckConstraint(check=check, name=constraint_name)
operation = migrations.AddConstraint("Pony", constraint)
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Pony = to_state.apps.get_model(app_label, "Pony")
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=2, weight=3.0)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=3, weight=1.0)
Pony.objects.bulk_create(
[
Pony(pink=3, weight=-1.0),
Pony(pink=1, weight=-1.0),
Pony(pink=3, weight=3.0),
]
)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_constraint_combinable(self):
app_label = "test_addconstraint_combinable"
operations = [
migrations.CreateModel(
"Book",
fields=[
("id", models.AutoField(primary_key=True)),
("read", models.PositiveIntegerField()),
("unread", models.PositiveIntegerField()),
],
),
]
from_state = self.apply_operations(app_label, ProjectState(), operations)
constraint = models.CheckConstraint(
check=models.Q(read=(100 - models.F("unread"))),
name="test_addconstraint_combinable_sum_100",
)
operation = migrations.AddConstraint("Book", constraint)
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Book = to_state.apps.get_model(app_label, "Book")
with self.assertRaises(IntegrityError), transaction.atomic():
Book.objects.create(read=70, unread=10)
Book.objects.create(read=70, unread=30)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_remove_constraint(self):
project_state = self.set_up_test_model(
"test_removeconstraint",
constraints=[
models.CheckConstraint(
check=models.Q(pink__gt=2),
name="test_remove_constraint_pony_pink_gt_2",
),
models.CheckConstraint(
check=models.Q(pink__lt=100),
name="test_remove_constraint_pony_pink_lt_100",
),
],
)
gt_operation = migrations.RemoveConstraint(
"Pony", "test_remove_constraint_pony_pink_gt_2"
)
self.assertEqual(
gt_operation.describe(),
"Remove constraint test_remove_constraint_pony_pink_gt_2 from model Pony",
)
self.assertEqual(
gt_operation.migration_name_fragment,
"remove_pony_test_remove_constraint_pony_pink_gt_2",
)
# Test state alteration
new_state = project_state.clone()
gt_operation.state_forwards("test_removeconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_removeconstraint", "pony"].options["constraints"]
),
1,
)
Pony = new_state.apps.get_model("test_removeconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
# Test database alteration
with connection.schema_editor() as editor:
gt_operation.database_forwards(
"test_removeconstraint", editor, project_state, new_state
)
Pony.objects.create(pink=1, weight=1.0).delete()
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=100, weight=1.0)
# Remove the other one.
lt_operation = migrations.RemoveConstraint(
"Pony", "test_remove_constraint_pony_pink_lt_100"
)
lt_operation.state_forwards("test_removeconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_removeconstraint", "pony"].options["constraints"]
),
0,
)
Pony = new_state.apps.get_model("test_removeconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
with connection.schema_editor() as editor:
lt_operation.database_forwards(
"test_removeconstraint", editor, project_state, new_state
)
Pony.objects.create(pink=100, weight=1.0).delete()
# Test reversal
with connection.schema_editor() as editor:
gt_operation.database_backwards(
"test_removeconstraint", editor, new_state, project_state
)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=1.0)
# Test deconstruction
definition = gt_operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "name": "test_remove_constraint_pony_pink_gt_2"},
)
def test_add_partial_unique_constraint(self):
project_state = self.set_up_test_model("test_addpartialuniqueconstraint")
partial_unique_constraint = models.UniqueConstraint(
fields=["pink"],
condition=models.Q(weight__gt=5),
name="test_constraint_pony_pink_for_weight_gt_5_uniq",
)
operation = migrations.AddConstraint("Pony", partial_unique_constraint)
self.assertEqual(
operation.describe(),
"Create constraint test_constraint_pony_pink_for_weight_gt_5_uniq "
"on model Pony",
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_addpartialuniqueconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_addpartialuniqueconstraint", "pony"].options[
"constraints"
]
),
1,
)
Pony = new_state.apps.get_model("test_addpartialuniqueconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_addpartialuniqueconstraint", editor, project_state, new_state
)
# Test constraint works
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=6.0)
if connection.features.supports_partial_indexes:
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=7.0)
else:
Pony.objects.create(pink=1, weight=7.0)
# Test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_addpartialuniqueconstraint", editor, new_state, project_state
)
# Test constraint doesn't work
Pony.objects.create(pink=1, weight=7.0)
# Test deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": partial_unique_constraint},
)
def test_remove_partial_unique_constraint(self):
project_state = self.set_up_test_model(
"test_removepartialuniqueconstraint",
constraints=[
models.UniqueConstraint(
fields=["pink"],
condition=models.Q(weight__gt=5),
name="test_constraint_pony_pink_for_weight_gt_5_uniq",
),
],
)
gt_operation = migrations.RemoveConstraint(
"Pony", "test_constraint_pony_pink_for_weight_gt_5_uniq"
)
self.assertEqual(
gt_operation.describe(),
"Remove constraint test_constraint_pony_pink_for_weight_gt_5_uniq from "
"model Pony",
)
# Test state alteration
new_state = project_state.clone()
gt_operation.state_forwards("test_removepartialuniqueconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_removepartialuniqueconstraint", "pony"].options[
"constraints"
]
),
0,
)
Pony = new_state.apps.get_model("test_removepartialuniqueconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
# Test database alteration
with connection.schema_editor() as editor:
gt_operation.database_forwards(
"test_removepartialuniqueconstraint", editor, project_state, new_state
)
# Test constraint doesn't work
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=6.0)
Pony.objects.create(pink=1, weight=7.0).delete()
# Test reversal
with connection.schema_editor() as editor:
gt_operation.database_backwards(
"test_removepartialuniqueconstraint", editor, new_state, project_state
)
# Test constraint works
if connection.features.supports_partial_indexes:
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=7.0)
else:
Pony.objects.create(pink=1, weight=7.0)
# Test deconstruction
definition = gt_operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"name": "test_constraint_pony_pink_for_weight_gt_5_uniq",
},
)
def test_add_deferred_unique_constraint(self):
app_label = "test_adddeferred_uc"
project_state = self.set_up_test_model(app_label)
deferred_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="deferred_pink_constraint_add",
deferrable=models.Deferrable.DEFERRED,
)
operation = migrations.AddConstraint("Pony", deferred_unique_constraint)
self.assertEqual(
operation.describe(),
"Create constraint deferred_pink_constraint_add on model Pony",
)
# Add constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 1
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
with connection.schema_editor() as editor, CaptureQueriesContext(
connection
) as ctx:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony.objects.create(pink=1, weight=4.0)
if connection.features.supports_deferrable_unique_constraints:
# Unique constraint is deferred.
with transaction.atomic():
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 2
obj.save()
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(
deferred_unique_constraint.name
)
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name)
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 3
obj.save()
else:
self.assertEqual(len(ctx), 0)
Pony.objects.create(pink=1, weight=4.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": deferred_unique_constraint},
)
def test_remove_deferred_unique_constraint(self):
app_label = "test_removedeferred_uc"
deferred_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="deferred_pink_constraint_rm",
deferrable=models.Deferrable.DEFERRED,
)
project_state = self.set_up_test_model(
app_label, constraints=[deferred_unique_constraint]
)
operation = migrations.RemoveConstraint("Pony", deferred_unique_constraint.name)
self.assertEqual(
operation.describe(),
"Remove constraint deferred_pink_constraint_rm from model Pony",
)
# Remove constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 0
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
with connection.schema_editor() as editor, CaptureQueriesContext(
connection
) as ctx:
operation.database_forwards(app_label, editor, project_state, new_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0).delete()
if not connection.features.supports_deferrable_unique_constraints:
self.assertEqual(len(ctx), 0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_deferrable_unique_constraints:
# Unique constraint is deferred.
with transaction.atomic():
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 2
obj.save()
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(
deferred_unique_constraint.name
)
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name)
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 3
obj.save()
else:
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"name": "deferred_pink_constraint_rm",
},
)
def test_add_covering_unique_constraint(self):
app_label = "test_addcovering_uc"
project_state = self.set_up_test_model(app_label)
covering_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="covering_pink_constraint_add",
include=["weight"],
)
operation = migrations.AddConstraint("Pony", covering_unique_constraint)
self.assertEqual(
operation.describe(),
"Create constraint covering_pink_constraint_add on model Pony",
)
# Add constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 1
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
with connection.schema_editor() as editor, CaptureQueriesContext(
connection
) as ctx:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony.objects.create(pink=1, weight=4.0)
if connection.features.supports_covering_indexes:
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=4.0)
else:
self.assertEqual(len(ctx), 0)
Pony.objects.create(pink=1, weight=4.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": covering_unique_constraint},
)
def test_remove_covering_unique_constraint(self):
app_label = "test_removecovering_uc"
covering_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="covering_pink_constraint_rm",
include=["weight"],
)
project_state = self.set_up_test_model(
app_label, constraints=[covering_unique_constraint]
)
operation = migrations.RemoveConstraint("Pony", covering_unique_constraint.name)
self.assertEqual(
operation.describe(),
"Remove constraint covering_pink_constraint_rm from model Pony",
)
# Remove constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 0
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
with connection.schema_editor() as editor, CaptureQueriesContext(
connection
) as ctx:
operation.database_forwards(app_label, editor, project_state, new_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0).delete()
if not connection.features.supports_covering_indexes:
self.assertEqual(len(ctx), 0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_covering_indexes:
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=4.0)
else:
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"name": "covering_pink_constraint_rm",
},
)
def test_alter_field_with_func_unique_constraint(self):
app_label = "test_alfuncuc"
constraint_name = f"{app_label}_pony_uq"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(
app_label,
constraints=[
models.UniqueConstraint("pink", "weight", name=constraint_name)
],
)
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(null=True)
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
def test_add_func_unique_constraint(self):
app_label = "test_adfuncuc"
constraint_name = f"{app_label}_pony_abs_uq"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(app_label)
constraint = models.UniqueConstraint(Abs("weight"), name=constraint_name)
operation = migrations.AddConstraint("Pony", constraint)
self.assertEqual(
operation.describe(),
"Create constraint test_adfuncuc_pony_abs_uq on model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"pony_test_adfuncuc_pony_abs_uq",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 1
)
self.assertIndexNameNotExists(table_name, constraint_name)
# Add constraint.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = new_state.apps.get_model(app_label, "Pony")
Pony.objects.create(weight=4.0)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
with self.assertRaises(IntegrityError):
Pony.objects.create(weight=-4.0)
else:
self.assertIndexNameNotExists(table_name, constraint_name)
Pony.objects.create(weight=-4.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameNotExists(table_name, constraint_name)
# Constraint doesn't work.
Pony.objects.create(weight=-4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": constraint},
)
def test_remove_func_unique_constraint(self):
app_label = "test_rmfuncuc"
constraint_name = f"{app_label}_pony_abs_uq"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(
app_label,
constraints=[
models.UniqueConstraint(Abs("weight"), name=constraint_name),
],
)
self.assertTableExists(table_name)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
operation = migrations.RemoveConstraint("Pony", constraint_name)
self.assertEqual(
operation.describe(),
"Remove constraint test_rmfuncuc_pony_abs_uq from model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"remove_pony_test_rmfuncuc_pony_abs_uq",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 0
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
# Remove constraint.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameNotExists(table_name, constraint_name)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=-4.0).delete()
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
with self.assertRaises(IntegrityError):
Pony.objects.create(weight=-4.0)
else:
self.assertIndexNameNotExists(table_name, constraint_name)
Pony.objects.create(weight=-4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": constraint_name})
def test_alter_model_options(self):
"""
Tests the AlterModelOptions operation.
"""
project_state = self.set_up_test_model("test_almoop")
# Test the state alteration (no DB alteration to test)
operation = migrations.AlterModelOptions(
"Pony", {"permissions": [("can_groom", "Can groom")]}
)
self.assertEqual(operation.describe(), "Change Meta options on Pony")
self.assertEqual(operation.migration_name_fragment, "alter_pony_options")
new_state = project_state.clone()
operation.state_forwards("test_almoop", new_state)
self.assertEqual(
len(
project_state.models["test_almoop", "pony"].options.get(
"permissions", []
)
),
0,
)
self.assertEqual(
len(new_state.models["test_almoop", "pony"].options.get("permissions", [])),
1,
)
self.assertEqual(
new_state.models["test_almoop", "pony"].options["permissions"][0][0],
"can_groom",
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelOptions")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"name": "Pony", "options": {"permissions": [("can_groom", "Can groom")]}},
)
def test_alter_model_options_emptying(self):
"""
The AlterModelOptions operation removes keys from the dict (#23121)
"""
project_state = self.set_up_test_model("test_almoop", options=True)
# Test the state alteration (no DB alteration to test)
operation = migrations.AlterModelOptions("Pony", {})
self.assertEqual(operation.describe(), "Change Meta options on Pony")
new_state = project_state.clone()
operation.state_forwards("test_almoop", new_state)
self.assertEqual(
len(
project_state.models["test_almoop", "pony"].options.get(
"permissions", []
)
),
1,
)
self.assertEqual(
len(new_state.models["test_almoop", "pony"].options.get("permissions", [])),
0,
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelOptions")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"name": "Pony", "options": {}})
def test_alter_order_with_respect_to(self):
"""
Tests the AlterOrderWithRespectTo operation.
"""
project_state = self.set_up_test_model("test_alorwrtto", related_model=True)
# Test the state alteration
operation = migrations.AlterOrderWithRespectTo("Rider", "pony")
self.assertEqual(
operation.describe(), "Set order_with_respect_to on Rider to pony"
)
self.assertEqual(
operation.migration_name_fragment,
"alter_rider_order_with_respect_to",
)
new_state = project_state.clone()
operation.state_forwards("test_alorwrtto", new_state)
self.assertIsNone(
project_state.models["test_alorwrtto", "rider"].options.get(
"order_with_respect_to", None
)
)
self.assertEqual(
new_state.models["test_alorwrtto", "rider"].options.get(
"order_with_respect_to", None
),
"pony",
)
# Make sure there's no matching index
self.assertColumnNotExists("test_alorwrtto_rider", "_order")
# Create some rows before alteration
rendered_state = project_state.apps
pony = rendered_state.get_model("test_alorwrtto", "Pony").objects.create(
weight=50
)
rider1 = rendered_state.get_model("test_alorwrtto", "Rider").objects.create(
pony=pony
)
rider1.friend = rider1
rider1.save()
rider2 = rendered_state.get_model("test_alorwrtto", "Rider").objects.create(
pony=pony
)
rider2.friend = rider2
rider2.save()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_alorwrtto", editor, project_state, new_state
)
self.assertColumnExists("test_alorwrtto_rider", "_order")
# Check for correct value in rows
updated_riders = new_state.apps.get_model(
"test_alorwrtto", "Rider"
).objects.all()
self.assertEqual(updated_riders[0]._order, 0)
self.assertEqual(updated_riders[1]._order, 0)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alorwrtto", editor, new_state, project_state
)
self.assertColumnNotExists("test_alorwrtto_rider", "_order")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterOrderWithRespectTo")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"name": "Rider", "order_with_respect_to": "pony"}
)
def test_alter_model_managers(self):
"""
The managers on a model are set.
"""
project_state = self.set_up_test_model("test_almoma")
# Test the state alteration
operation = migrations.AlterModelManagers(
"Pony",
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
],
)
self.assertEqual(operation.describe(), "Change managers on Pony")
self.assertEqual(operation.migration_name_fragment, "alter_pony_managers")
managers = project_state.models["test_almoma", "pony"].managers
self.assertEqual(managers, [])
new_state = project_state.clone()
operation.state_forwards("test_almoma", new_state)
self.assertIn(("test_almoma", "pony"), new_state.models)
managers = new_state.models["test_almoma", "pony"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
rendered_state = new_state.apps
model = rendered_state.get_model("test_almoma", "pony")
self.assertIsInstance(model.food_qs, models.Manager)
self.assertIsInstance(model.food_mgr, FoodManager)
self.assertIsInstance(model.food_mgr_kwargs, FoodManager)
def test_alter_model_managers_emptying(self):
"""
The managers on a model are set.
"""
project_state = self.set_up_test_model("test_almomae", manager_model=True)
# Test the state alteration
operation = migrations.AlterModelManagers("Food", managers=[])
self.assertEqual(operation.describe(), "Change managers on Food")
self.assertIn(("test_almomae", "food"), project_state.models)
managers = project_state.models["test_almomae", "food"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
new_state = project_state.clone()
operation.state_forwards("test_almomae", new_state)
managers = new_state.models["test_almomae", "food"].managers
self.assertEqual(managers, [])
def test_alter_fk(self):
"""
Creating and then altering an FK works correctly
and deals with the pending SQL (#23091)
"""
project_state = self.set_up_test_model("test_alfk")
# Test adding and then altering the FK in one go
create_operation = migrations.CreateModel(
name="Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("pony", models.ForeignKey("Pony", models.CASCADE)),
],
)
create_state = project_state.clone()
create_operation.state_forwards("test_alfk", create_state)
alter_operation = migrations.AlterField(
model_name="Rider",
name="pony",
field=models.ForeignKey("Pony", models.CASCADE, editable=False),
)
alter_state = create_state.clone()
alter_operation.state_forwards("test_alfk", alter_state)
with connection.schema_editor() as editor:
create_operation.database_forwards(
"test_alfk", editor, project_state, create_state
)
alter_operation.database_forwards(
"test_alfk", editor, create_state, alter_state
)
def test_alter_fk_non_fk(self):
"""
Altering an FK to a non-FK works (#23244)
"""
# Test the state alteration
operation = migrations.AlterField(
model_name="Rider",
name="pony",
field=models.FloatField(),
)
project_state, new_state = self.make_test_state(
"test_afknfk", operation, related_model=True
)
# Test the database alteration
self.assertColumnExists("test_afknfk_rider", "pony_id")
self.assertColumnNotExists("test_afknfk_rider", "pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_afknfk", editor, project_state, new_state)
self.assertColumnExists("test_afknfk_rider", "pony")
self.assertColumnNotExists("test_afknfk_rider", "pony_id")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_afknfk", editor, new_state, project_state
)
self.assertColumnExists("test_afknfk_rider", "pony_id")
self.assertColumnNotExists("test_afknfk_rider", "pony")
def test_run_sql(self):
"""
Tests the RunSQL operation.
"""
project_state = self.set_up_test_model("test_runsql")
# Create the operation
operation = migrations.RunSQL(
# Use a multi-line string with a comment to test splitting on
# SQLite and MySQL respectively.
"CREATE TABLE i_love_ponies (id int, special_thing varchar(15));\n"
"INSERT INTO i_love_ponies (id, special_thing) "
"VALUES (1, 'i love ponies'); -- this is magic!\n"
"INSERT INTO i_love_ponies (id, special_thing) "
"VALUES (2, 'i love django');\n"
"UPDATE i_love_ponies SET special_thing = 'Ponies' "
"WHERE special_thing LIKE '%%ponies';"
"UPDATE i_love_ponies SET special_thing = 'Django' "
"WHERE special_thing LIKE '%django';",
# Run delete queries to test for parameter substitution failure
# reported in #23426
"DELETE FROM i_love_ponies WHERE special_thing LIKE '%Django%';"
"DELETE FROM i_love_ponies WHERE special_thing LIKE '%%Ponies%%';"
"DROP TABLE i_love_ponies",
state_operations=[
migrations.CreateModel(
"SomethingElse", [("id", models.AutoField(primary_key=True))]
)
],
)
self.assertEqual(operation.describe(), "Raw SQL operation")
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_runsql", new_state)
self.assertEqual(
len(new_state.models["test_runsql", "somethingelse"].fields), 1
)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
# Test SQL collection
with connection.schema_editor(collect_sql=True) as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
self.assertIn("LIKE '%%ponies';", "\n".join(editor.collected_sql))
operation.database_backwards(
"test_runsql", editor, project_state, new_state
)
self.assertIn("LIKE '%%Ponies%%';", "\n".join(editor.collected_sql))
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
self.assertTableExists("i_love_ponies")
# Make sure all the SQL was processed
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 2)
cursor.execute(
"SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Django'"
)
self.assertEqual(cursor.fetchall()[0][0], 1)
cursor.execute(
"SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Ponies'"
)
self.assertEqual(cursor.fetchall()[0][0], 1)
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
self.assertTableNotExists("i_love_ponies")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunSQL")
self.assertEqual(definition[1], [])
self.assertEqual(
sorted(definition[2]), ["reverse_sql", "sql", "state_operations"]
)
# And elidable reduction
self.assertIs(False, operation.reduce(operation, []))
elidable_operation = migrations.RunSQL("SELECT 1 FROM void;", elidable=True)
self.assertEqual(elidable_operation.reduce(operation, []), [operation])
def test_run_sql_params(self):
"""
#23426 - RunSQL should accept parameters.
"""
project_state = self.set_up_test_model("test_runsql")
# Create the operation
operation = migrations.RunSQL(
["CREATE TABLE i_love_ponies (id int, special_thing varchar(15));"],
["DROP TABLE i_love_ponies"],
)
param_operation = migrations.RunSQL(
# forwards
(
"INSERT INTO i_love_ponies (id, special_thing) VALUES (1, 'Django');",
[
"INSERT INTO i_love_ponies (id, special_thing) VALUES (2, %s);",
["Ponies"],
],
(
"INSERT INTO i_love_ponies (id, special_thing) VALUES (%s, %s);",
(
3,
"Python",
),
),
),
# backwards
[
"DELETE FROM i_love_ponies WHERE special_thing = 'Django';",
["DELETE FROM i_love_ponies WHERE special_thing = 'Ponies';", None],
(
"DELETE FROM i_love_ponies WHERE id = %s OR special_thing = %s;",
[3, "Python"],
),
],
)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
new_state = project_state.clone()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
# Test parameter passing
with connection.schema_editor() as editor:
param_operation.database_forwards(
"test_runsql", editor, project_state, new_state
)
# Make sure all the SQL was processed
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 3)
with connection.schema_editor() as editor:
param_operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 0)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
self.assertTableNotExists("i_love_ponies")
def test_run_sql_params_invalid(self):
"""
#23426 - RunSQL should fail when a list of statements with an incorrect
number of tuples is given.
"""
project_state = self.set_up_test_model("test_runsql")
new_state = project_state.clone()
operation = migrations.RunSQL(
# forwards
[["INSERT INTO foo (bar) VALUES ('buz');"]],
# backwards
(("DELETE FROM foo WHERE bar = 'buz';", "invalid", "parameter count"),),
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, "Expected a 2-tuple but got 1"):
operation.database_forwards(
"test_runsql", editor, project_state, new_state
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, "Expected a 2-tuple but got 3"):
operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
def test_run_sql_noop(self):
"""
#24098 - Tests no-op RunSQL operations.
"""
operation = migrations.RunSQL(migrations.RunSQL.noop, migrations.RunSQL.noop)
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, None, None)
operation.database_backwards("test_runsql", editor, None, None)
def test_run_sql_add_missing_semicolon_on_collect_sql(self):
project_state = self.set_up_test_model("test_runsql")
new_state = project_state.clone()
tests = [
"INSERT INTO test_runsql_pony (pink, weight) VALUES (1, 1);\n",
"INSERT INTO test_runsql_pony (pink, weight) VALUES (1, 1)\n",
]
for sql in tests:
with self.subTest(sql=sql):
operation = migrations.RunSQL(sql, migrations.RunPython.noop)
with connection.schema_editor(collect_sql=True) as editor:
operation.database_forwards(
"test_runsql", editor, project_state, new_state
)
collected_sql = "\n".join(editor.collected_sql)
self.assertEqual(collected_sql.count(";"), 1)
def test_run_python(self):
"""
Tests the RunPython operation
"""
project_state = self.set_up_test_model("test_runpython", mti_model=True)
# Create the operation
def inner_method(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
Pony.objects.create(pink=1, weight=3.55)
Pony.objects.create(weight=5)
def inner_method_reverse(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
Pony.objects.filter(pink=1, weight=3.55).delete()
Pony.objects.filter(weight=5).delete()
operation = migrations.RunPython(
inner_method, reverse_code=inner_method_reverse
)
self.assertEqual(operation.describe(), "Raw Python operation")
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards("test_runpython", new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0
)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2
)
# Now test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0
)
# Now test we can't use a string
with self.assertRaisesMessage(
ValueError, "RunPython must be supplied with a callable"
):
migrations.RunPython("print 'ahahaha'")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["code", "reverse_code"])
# Also test reversal fails, with an operation identical to above but
# without reverse_code set.
no_reverse_operation = migrations.RunPython(inner_method)
self.assertFalse(no_reverse_operation.reversible)
with connection.schema_editor() as editor:
no_reverse_operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
with self.assertRaises(NotImplementedError):
no_reverse_operation.database_backwards(
"test_runpython", editor, new_state, project_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2
)
def create_ponies(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
pony1 = Pony.objects.create(pink=1, weight=3.55)
self.assertIsNot(pony1.pk, None)
pony2 = Pony.objects.create(weight=5)
self.assertIsNot(pony2.pk, None)
self.assertNotEqual(pony1.pk, pony2.pk)
operation = migrations.RunPython(create_ponies)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 4
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["code"])
def create_shetlandponies(models, schema_editor):
ShetlandPony = models.get_model("test_runpython", "ShetlandPony")
pony1 = ShetlandPony.objects.create(weight=4.0)
self.assertIsNot(pony1.pk, None)
pony2 = ShetlandPony.objects.create(weight=5.0)
self.assertIsNot(pony2.pk, None)
self.assertNotEqual(pony1.pk, pony2.pk)
operation = migrations.RunPython(create_shetlandponies)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 6
)
self.assertEqual(
project_state.apps.get_model(
"test_runpython", "ShetlandPony"
).objects.count(),
2,
)
# And elidable reduction
self.assertIs(False, operation.reduce(operation, []))
elidable_operation = migrations.RunPython(inner_method, elidable=True)
self.assertEqual(elidable_operation.reduce(operation, []), [operation])
def test_run_python_atomic(self):
"""
Tests the RunPython operation correctly handles the "atomic" keyword
"""
project_state = self.set_up_test_model("test_runpythonatomic", mti_model=True)
def inner_method(models, schema_editor):
Pony = models.get_model("test_runpythonatomic", "Pony")
Pony.objects.create(pink=1, weight=3.55)
raise ValueError("Adrian hates ponies.")
# Verify atomicity when applying.
atomic_migration = Migration("test", "test_runpythonatomic")
atomic_migration.operations = [
migrations.RunPython(inner_method, reverse_code=inner_method)
]
non_atomic_migration = Migration("test", "test_runpythonatomic")
non_atomic_migration.operations = [
migrations.RunPython(inner_method, reverse_code=inner_method, atomic=False)
]
# If we're a fully-transactional database, both versions should rollback
if connection.features.can_rollback_ddl:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
# Otherwise, the non-atomic operation should leave a row there
else:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
1,
)
# Reset object count to zero and verify atomicity when unapplying.
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.all().delete()
# On a fully-transactional database, both versions rollback.
if connection.features.can_rollback_ddl:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
# Otherwise, the non-atomic operation leaves a row there.
else:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
1,
)
# Verify deconstruction.
definition = non_atomic_migration.operations[0].deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["atomic", "code", "reverse_code"])
def test_run_python_related_assignment(self):
"""
#24282 - Model changes to a FK reverse side update the model
on the FK side as well.
"""
def inner_method(models, schema_editor):
Author = models.get_model("test_authors", "Author")
Book = models.get_model("test_books", "Book")
author = Author.objects.create(name="Hemingway")
Book.objects.create(title="Old Man and The Sea", author=author)
create_author = migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_book = migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
("title", models.CharField(max_length=100)),
("author", models.ForeignKey("test_authors.Author", models.CASCADE)),
],
options={},
)
add_hometown = migrations.AddField(
"Author",
"hometown",
models.CharField(max_length=100),
)
create_old_man = migrations.RunPython(inner_method, inner_method)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_author.state_forwards("test_authors", new_state)
create_author.database_forwards(
"test_authors", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_book.state_forwards("test_books", new_state)
create_book.database_forwards(
"test_books", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
add_hometown.state_forwards("test_authors", new_state)
add_hometown.database_forwards(
"test_authors", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_old_man.state_forwards("test_books", new_state)
create_old_man.database_forwards(
"test_books", editor, project_state, new_state
)
def test_model_with_bigautofield(self):
"""
A model with BigAutoField can be created.
"""
def create_data(models, schema_editor):
Author = models.get_model("test_author", "Author")
Book = models.get_model("test_book", "Book")
author1 = Author.objects.create(name="Hemingway")
Book.objects.create(title="Old Man and The Sea", author=author1)
Book.objects.create(id=2**33, title="A farewell to arms", author=author1)
author2 = Author.objects.create(id=2**33, name="Remarque")
Book.objects.create(title="All quiet on the western front", author=author2)
Book.objects.create(title="Arc de Triomphe", author=author2)
create_author = migrations.CreateModel(
"Author",
[
("id", models.BigAutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_book = migrations.CreateModel(
"Book",
[
("id", models.BigAutoField(primary_key=True)),
("title", models.CharField(max_length=100)),
(
"author",
models.ForeignKey(
to="test_author.Author", on_delete=models.CASCADE
),
),
],
options={},
)
fill_data = migrations.RunPython(create_data)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_author.state_forwards("test_author", new_state)
create_author.database_forwards(
"test_author", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_book.state_forwards("test_book", new_state)
create_book.database_forwards("test_book", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
fill_data.state_forwards("fill_data", new_state)
fill_data.database_forwards("fill_data", editor, project_state, new_state)
def _test_autofield_foreignfield_growth(
self, source_field, target_field, target_value
):
"""
A field may be migrated in the following ways:
- AutoField to BigAutoField
- SmallAutoField to AutoField
- SmallAutoField to BigAutoField
"""
def create_initial_data(models, schema_editor):
Article = models.get_model("test_article", "Article")
Blog = models.get_model("test_blog", "Blog")
blog = Blog.objects.create(name="web development done right")
Article.objects.create(name="Frameworks", blog=blog)
Article.objects.create(name="Programming Languages", blog=blog)
def create_big_data(models, schema_editor):
Article = models.get_model("test_article", "Article")
Blog = models.get_model("test_blog", "Blog")
blog2 = Blog.objects.create(name="Frameworks", id=target_value)
Article.objects.create(name="Django", blog=blog2)
Article.objects.create(id=target_value, name="Django2", blog=blog2)
create_blog = migrations.CreateModel(
"Blog",
[
("id", source_field(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_article = migrations.CreateModel(
"Article",
[
("id", source_field(primary_key=True)),
(
"blog",
models.ForeignKey(to="test_blog.Blog", on_delete=models.CASCADE),
),
("name", models.CharField(max_length=100)),
("data", models.TextField(default="")),
],
options={},
)
fill_initial_data = migrations.RunPython(
create_initial_data, create_initial_data
)
fill_big_data = migrations.RunPython(create_big_data, create_big_data)
grow_article_id = migrations.AlterField(
"Article", "id", target_field(primary_key=True)
)
grow_blog_id = migrations.AlterField(
"Blog", "id", target_field(primary_key=True)
)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_blog.state_forwards("test_blog", new_state)
create_blog.database_forwards("test_blog", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_article.state_forwards("test_article", new_state)
create_article.database_forwards(
"test_article", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
fill_initial_data.state_forwards("fill_initial_data", new_state)
fill_initial_data.database_forwards(
"fill_initial_data", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
grow_article_id.state_forwards("test_article", new_state)
grow_article_id.database_forwards(
"test_article", editor, project_state, new_state
)
state = new_state.clone()
article = state.apps.get_model("test_article.Article")
self.assertIsInstance(article._meta.pk, target_field)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
grow_blog_id.state_forwards("test_blog", new_state)
grow_blog_id.database_forwards(
"test_blog", editor, project_state, new_state
)
state = new_state.clone()
blog = state.apps.get_model("test_blog.Blog")
self.assertIsInstance(blog._meta.pk, target_field)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
fill_big_data.state_forwards("fill_big_data", new_state)
fill_big_data.database_forwards(
"fill_big_data", editor, project_state, new_state
)
def test_autofield__bigautofield_foreignfield_growth(self):
"""A field may be migrated from AutoField to BigAutoField."""
self._test_autofield_foreignfield_growth(
models.AutoField,
models.BigAutoField,
2**33,
)
def test_smallfield_autofield_foreignfield_growth(self):
"""A field may be migrated from SmallAutoField to AutoField."""
self._test_autofield_foreignfield_growth(
models.SmallAutoField,
models.AutoField,
2**22,
)
def test_smallfield_bigautofield_foreignfield_growth(self):
"""A field may be migrated from SmallAutoField to BigAutoField."""
self._test_autofield_foreignfield_growth(
models.SmallAutoField,
models.BigAutoField,
2**33,
)
def test_run_python_noop(self):
"""
#24098 - Tests no-op RunPython operations.
"""
project_state = ProjectState()
new_state = project_state.clone()
operation = migrations.RunPython(
migrations.RunPython.noop, migrations.RunPython.noop
)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
operation.database_backwards(
"test_runpython", editor, new_state, project_state
)
def test_separate_database_and_state(self):
"""
Tests the SeparateDatabaseAndState operation.
"""
project_state = self.set_up_test_model("test_separatedatabaseandstate")
# Create the operation
database_operation = migrations.RunSQL(
"CREATE TABLE i_love_ponies (id int, special_thing int);",
"DROP TABLE i_love_ponies;",
)
state_operation = migrations.CreateModel(
"SomethingElse", [("id", models.AutoField(primary_key=True))]
)
operation = migrations.SeparateDatabaseAndState(
state_operations=[state_operation], database_operations=[database_operation]
)
self.assertEqual(
operation.describe(), "Custom state/database change combination"
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_separatedatabaseandstate", new_state)
self.assertEqual(
len(
new_state.models[
"test_separatedatabaseandstate", "somethingelse"
].fields
),
1,
)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_separatedatabaseandstate", editor, project_state, new_state
)
self.assertTableExists("i_love_ponies")
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(
"test_separatedatabaseandstate", editor, new_state, project_state
)
self.assertTableNotExists("i_love_ponies")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "SeparateDatabaseAndState")
self.assertEqual(definition[1], [])
self.assertEqual(
sorted(definition[2]), ["database_operations", "state_operations"]
)
def test_separate_database_and_state2(self):
"""
A complex SeparateDatabaseAndState operation: Multiple operations both
for state and database. Verify the state dependencies within each list
and that state ops don't affect the database.
"""
app_label = "test_separatedatabaseandstate2"
project_state = self.set_up_test_model(app_label)
# Create the operation
database_operations = [
migrations.CreateModel(
"ILovePonies",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "iloveponies"},
),
migrations.CreateModel(
"ILoveMorePonies",
# We use IntegerField and not AutoField because
# the model is going to be deleted immediately
# and with an AutoField this fails on Oracle
[("id", models.IntegerField(primary_key=True))],
options={"db_table": "ilovemoreponies"},
),
migrations.DeleteModel("ILoveMorePonies"),
migrations.CreateModel(
"ILoveEvenMorePonies",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "iloveevenmoreponies"},
),
]
state_operations = [
migrations.CreateModel(
"SomethingElse",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "somethingelse"},
),
migrations.DeleteModel("SomethingElse"),
migrations.CreateModel(
"SomethingCompletelyDifferent",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "somethingcompletelydifferent"},
),
]
operation = migrations.SeparateDatabaseAndState(
state_operations=state_operations,
database_operations=database_operations,
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
def assertModelsAndTables(after_db):
# Tables and models exist, or don't, as they should:
self.assertNotIn((app_label, "somethingelse"), new_state.models)
self.assertEqual(
len(new_state.models[app_label, "somethingcompletelydifferent"].fields),
1,
)
self.assertNotIn((app_label, "iloveponiesonies"), new_state.models)
self.assertNotIn((app_label, "ilovemoreponies"), new_state.models)
self.assertNotIn((app_label, "iloveevenmoreponies"), new_state.models)
self.assertTableNotExists("somethingelse")
self.assertTableNotExists("somethingcompletelydifferent")
self.assertTableNotExists("ilovemoreponies")
if after_db:
self.assertTableExists("iloveponies")
self.assertTableExists("iloveevenmoreponies")
else:
self.assertTableNotExists("iloveponies")
self.assertTableNotExists("iloveevenmoreponies")
assertModelsAndTables(after_db=False)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
assertModelsAndTables(after_db=True)
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
assertModelsAndTables(after_db=False)
class SwappableOperationTests(OperationTestBase):
"""
Key operations ignore swappable models
(we don't want to replicate all of them here, as the functionality
is in a common base class anyway)
"""
available_apps = ["migrations"]
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_create_ignore_swapped(self):
"""
The CreateTable operation ignores swapped models.
"""
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
options={
"swappable": "TEST_SWAP_MODEL",
},
)
# Test the state alteration (it should still be there!)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crigsw", new_state)
self.assertEqual(new_state.models["test_crigsw", "pony"].name, "Pony")
self.assertEqual(len(new_state.models["test_crigsw", "pony"].fields), 2)
# Test the database alteration
self.assertTableNotExists("test_crigsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crigsw", editor, project_state, new_state)
self.assertTableNotExists("test_crigsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crigsw", editor, new_state, project_state
)
self.assertTableNotExists("test_crigsw_pony")
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_delete_ignore_swapped(self):
"""
Tests the DeleteModel operation ignores swapped models.
"""
operation = migrations.DeleteModel("Pony")
project_state, new_state = self.make_test_state("test_dligsw", operation)
# Test the database alteration
self.assertTableNotExists("test_dligsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dligsw", editor, project_state, new_state)
self.assertTableNotExists("test_dligsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_dligsw", editor, new_state, project_state
)
self.assertTableNotExists("test_dligsw_pony")
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_add_field_ignore_swapped(self):
"""
Tests the AddField operation.
"""
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=5),
)
project_state, new_state = self.make_test_state("test_adfligsw", operation)
# Test the database alteration
self.assertTableNotExists("test_adfligsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards(
"test_adfligsw", editor, project_state, new_state
)
self.assertTableNotExists("test_adfligsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_adfligsw", editor, new_state, project_state
)
self.assertTableNotExists("test_adfligsw_pony")
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_indexes_ignore_swapped(self):
"""
Add/RemoveIndex operations ignore swapped models.
"""
operation = migrations.AddIndex(
"Pony", models.Index(fields=["pink"], name="my_name_idx")
)
project_state, new_state = self.make_test_state("test_adinigsw", operation)
with connection.schema_editor() as editor:
# No database queries should be run for swapped models
operation.database_forwards(
"test_adinigsw", editor, project_state, new_state
)
operation.database_backwards(
"test_adinigsw", editor, new_state, project_state
)
operation = migrations.RemoveIndex(
"Pony", models.Index(fields=["pink"], name="my_name_idx")
)
project_state, new_state = self.make_test_state("test_rminigsw", operation)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_rminigsw", editor, project_state, new_state
)
operation.database_backwards(
"test_rminigsw", editor, new_state, project_state
)
class TestCreateModel(SimpleTestCase):
def test_references_model_mixin(self):
migrations.CreateModel(
"name",
fields=[],
bases=(Mixin, models.Model),
).references_model("other_model", "migrations")
class FieldOperationTests(SimpleTestCase):
def test_references_model(self):
operation = FieldOperation(
"MoDel", "field", models.ForeignKey("Other", models.CASCADE)
)
# Model name match.
self.assertIs(operation.references_model("mOdEl", "migrations"), True)
# Referenced field.
self.assertIs(operation.references_model("oTher", "migrations"), True)
# Doesn't reference.
self.assertIs(operation.references_model("Whatever", "migrations"), False)
def test_references_field_by_name(self):
operation = FieldOperation("MoDel", "field", models.BooleanField(default=False))
self.assertIs(operation.references_field("model", "field", "migrations"), True)
def test_references_field_by_remote_field_model(self):
operation = FieldOperation(
"Model", "field", models.ForeignKey("Other", models.CASCADE)
)
self.assertIs(
operation.references_field("Other", "whatever", "migrations"), True
)
self.assertIs(
operation.references_field("Missing", "whatever", "migrations"), False
)
def test_references_field_by_from_fields(self):
operation = FieldOperation(
"Model",
"field",
models.fields.related.ForeignObject(
"Other", models.CASCADE, ["from"], ["to"]
),
)
self.assertIs(operation.references_field("Model", "from", "migrations"), True)
self.assertIs(operation.references_field("Model", "to", "migrations"), False)
self.assertIs(operation.references_field("Other", "from", "migrations"), False)
self.assertIs(operation.references_field("Model", "to", "migrations"), False)
def test_references_field_by_to_fields(self):
operation = FieldOperation(
"Model",
"field",
models.ForeignKey("Other", models.CASCADE, to_field="field"),
)
self.assertIs(operation.references_field("Other", "field", "migrations"), True)
self.assertIs(
operation.references_field("Other", "whatever", "migrations"), False
)
self.assertIs(
operation.references_field("Missing", "whatever", "migrations"), False
)
def test_references_field_by_through(self):
operation = FieldOperation(
"Model", "field", models.ManyToManyField("Other", through="Through")
)
self.assertIs(
operation.references_field("Other", "whatever", "migrations"), True
)
self.assertIs(
operation.references_field("Through", "whatever", "migrations"), True
)
self.assertIs(
operation.references_field("Missing", "whatever", "migrations"), False
)
def test_reference_field_by_through_fields(self):
operation = FieldOperation(
"Model",
"field",
models.ManyToManyField(
"Other", through="Through", through_fields=("first", "second")
),
)
self.assertIs(
operation.references_field("Other", "whatever", "migrations"), True
)
self.assertIs(
operation.references_field("Through", "whatever", "migrations"), False
)
self.assertIs(
operation.references_field("Through", "first", "migrations"), True
)
self.assertIs(
operation.references_field("Through", "second", "migrations"), True
)
|
e3d3620d7463c1e337e6676620230af7bec92c1bfa7b7167b8f0a792d3334a47 | import functools
import re
from unittest import mock
from django.apps import apps
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser
from django.core.validators import RegexValidator, validate_slug
from django.db import connection, migrations, models
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.state import ModelState, ProjectState
from django.test import SimpleTestCase, TestCase, ignore_warnings, override_settings
from django.test.utils import isolate_lru_cache
from django.utils.deprecation import RemovedInDjango51Warning
from .models import FoodManager, FoodQuerySet
class DeconstructibleObject:
"""
A custom deconstructible object.
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def deconstruct(self):
return (self.__module__ + "." + self.__class__.__name__, self.args, self.kwargs)
class BaseAutodetectorTests(TestCase):
def repr_changes(self, changes, include_dependencies=False):
output = ""
for app_label, migrations_ in sorted(changes.items()):
output += " %s:\n" % app_label
for migration in migrations_:
output += " %s\n" % migration.name
for operation in migration.operations:
output += " %s\n" % operation
if include_dependencies:
output += " Dependencies:\n"
if migration.dependencies:
for dep in migration.dependencies:
output += " %s\n" % (dep,)
else:
output += " None\n"
return output
def assertNumberMigrations(self, changes, app_label, number):
if len(changes.get(app_label, [])) != number:
self.fail(
"Incorrect number of migrations (%s) for %s (expected %s)\n%s"
% (
len(changes.get(app_label, [])),
app_label,
number,
self.repr_changes(changes),
)
)
def assertMigrationDependencies(self, changes, app_label, position, dependencies):
if not changes.get(app_label):
self.fail(
"No migrations found for %s\n%s"
% (app_label, self.repr_changes(changes))
)
if len(changes[app_label]) < position + 1:
self.fail(
"No migration at index %s for %s\n%s"
% (position, app_label, self.repr_changes(changes))
)
migration = changes[app_label][position]
if set(migration.dependencies) != set(dependencies):
self.fail(
"Migration dependencies mismatch for %s.%s (expected %s):\n%s"
% (
app_label,
migration.name,
dependencies,
self.repr_changes(changes, include_dependencies=True),
)
)
def assertOperationTypes(self, changes, app_label, position, types):
if not changes.get(app_label):
self.fail(
"No migrations found for %s\n%s"
% (app_label, self.repr_changes(changes))
)
if len(changes[app_label]) < position + 1:
self.fail(
"No migration at index %s for %s\n%s"
% (position, app_label, self.repr_changes(changes))
)
migration = changes[app_label][position]
real_types = [
operation.__class__.__name__ for operation in migration.operations
]
if types != real_types:
self.fail(
"Operation type mismatch for %s.%s (expected %s):\n%s"
% (
app_label,
migration.name,
types,
self.repr_changes(changes),
)
)
def assertOperationAttributes(
self, changes, app_label, position, operation_position, **attrs
):
if not changes.get(app_label):
self.fail(
"No migrations found for %s\n%s"
% (app_label, self.repr_changes(changes))
)
if len(changes[app_label]) < position + 1:
self.fail(
"No migration at index %s for %s\n%s"
% (position, app_label, self.repr_changes(changes))
)
migration = changes[app_label][position]
if len(changes[app_label]) < position + 1:
self.fail(
"No operation at index %s for %s.%s\n%s"
% (
operation_position,
app_label,
migration.name,
self.repr_changes(changes),
)
)
operation = migration.operations[operation_position]
for attr, value in attrs.items():
if getattr(operation, attr, None) != value:
self.fail(
"Attribute mismatch for %s.%s op #%s, %s (expected %r, got %r):\n%s"
% (
app_label,
migration.name,
operation_position,
attr,
value,
getattr(operation, attr, None),
self.repr_changes(changes),
)
)
def assertOperationFieldAttributes(
self, changes, app_label, position, operation_position, **attrs
):
if not changes.get(app_label):
self.fail(
"No migrations found for %s\n%s"
% (app_label, self.repr_changes(changes))
)
if len(changes[app_label]) < position + 1:
self.fail(
"No migration at index %s for %s\n%s"
% (position, app_label, self.repr_changes(changes))
)
migration = changes[app_label][position]
if len(changes[app_label]) < position + 1:
self.fail(
"No operation at index %s for %s.%s\n%s"
% (
operation_position,
app_label,
migration.name,
self.repr_changes(changes),
)
)
operation = migration.operations[operation_position]
if not hasattr(operation, "field"):
self.fail(
"No field attribute for %s.%s op #%s."
% (
app_label,
migration.name,
operation_position,
)
)
field = operation.field
for attr, value in attrs.items():
if getattr(field, attr, None) != value:
self.fail(
"Field attribute mismatch for %s.%s op #%s, field.%s (expected %r, "
"got %r):\n%s"
% (
app_label,
migration.name,
operation_position,
attr,
value,
getattr(field, attr, None),
self.repr_changes(changes),
)
)
def make_project_state(self, model_states):
"Shortcut to make ProjectStates from lists of predefined models"
project_state = ProjectState()
for model_state in model_states:
project_state.add_model(model_state.clone())
return project_state
def get_changes(self, before_states, after_states, questioner=None):
if not isinstance(before_states, ProjectState):
before_states = self.make_project_state(before_states)
if not isinstance(after_states, ProjectState):
after_states = self.make_project_state(after_states)
return MigrationAutodetector(
before_states,
after_states,
questioner,
)._detect_changes()
class AutodetectorTests(BaseAutodetectorTests):
"""
Tests the migration autodetector.
"""
author_empty = ModelState(
"testapp", "Author", [("id", models.AutoField(primary_key=True))]
)
author_name = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
],
)
author_name_null = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, null=True)),
],
)
author_name_longer = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=400)),
],
)
author_name_renamed = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("names", models.CharField(max_length=200)),
],
)
author_name_default = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default="Ada Lovelace")),
],
)
author_name_check_constraint = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
],
{
"constraints": [
models.CheckConstraint(
check=models.Q(name__contains="Bob"), name="name_contains_bob"
)
]
},
)
author_dates_of_birth_auto_now = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("date_of_birth", models.DateField(auto_now=True)),
("date_time_of_birth", models.DateTimeField(auto_now=True)),
("time_of_birth", models.TimeField(auto_now=True)),
],
)
author_dates_of_birth_auto_now_add = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("date_of_birth", models.DateField(auto_now_add=True)),
("date_time_of_birth", models.DateTimeField(auto_now_add=True)),
("time_of_birth", models.TimeField(auto_now_add=True)),
],
)
author_name_deconstructible_1 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject())),
],
)
author_name_deconstructible_2 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject())),
],
)
author_name_deconstructible_3 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=models.IntegerField())),
],
)
author_name_deconstructible_4 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=models.IntegerField())),
],
)
author_name_deconstructible_list_1 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200, default=[DeconstructibleObject(), 123]
),
),
],
)
author_name_deconstructible_list_2 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200, default=[DeconstructibleObject(), 123]
),
),
],
)
author_name_deconstructible_list_3 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200, default=[DeconstructibleObject(), 999]
),
),
],
)
author_name_deconstructible_tuple_1 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200, default=(DeconstructibleObject(), 123)
),
),
],
)
author_name_deconstructible_tuple_2 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200, default=(DeconstructibleObject(), 123)
),
),
],
)
author_name_deconstructible_tuple_3 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200, default=(DeconstructibleObject(), 999)
),
),
],
)
author_name_deconstructible_dict_1 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
default={"item": DeconstructibleObject(), "otheritem": 123},
),
),
],
)
author_name_deconstructible_dict_2 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
default={"item": DeconstructibleObject(), "otheritem": 123},
),
),
],
)
author_name_deconstructible_dict_3 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
default={"item": DeconstructibleObject(), "otheritem": 999},
),
),
],
)
author_name_nested_deconstructible_1 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
default=DeconstructibleObject(
DeconstructibleObject(1),
(
DeconstructibleObject("t1"),
DeconstructibleObject("t2"),
),
a=DeconstructibleObject("A"),
b=DeconstructibleObject(B=DeconstructibleObject("c")),
),
),
),
],
)
author_name_nested_deconstructible_2 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
default=DeconstructibleObject(
DeconstructibleObject(1),
(
DeconstructibleObject("t1"),
DeconstructibleObject("t2"),
),
a=DeconstructibleObject("A"),
b=DeconstructibleObject(B=DeconstructibleObject("c")),
),
),
),
],
)
author_name_nested_deconstructible_changed_arg = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
default=DeconstructibleObject(
DeconstructibleObject(1),
(
DeconstructibleObject("t1"),
DeconstructibleObject("t2-changed"),
),
a=DeconstructibleObject("A"),
b=DeconstructibleObject(B=DeconstructibleObject("c")),
),
),
),
],
)
author_name_nested_deconstructible_extra_arg = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
default=DeconstructibleObject(
DeconstructibleObject(1),
(
DeconstructibleObject("t1"),
DeconstructibleObject("t2"),
),
None,
a=DeconstructibleObject("A"),
b=DeconstructibleObject(B=DeconstructibleObject("c")),
),
),
),
],
)
author_name_nested_deconstructible_changed_kwarg = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
default=DeconstructibleObject(
DeconstructibleObject(1),
(
DeconstructibleObject("t1"),
DeconstructibleObject("t2"),
),
a=DeconstructibleObject("A"),
b=DeconstructibleObject(B=DeconstructibleObject("c-changed")),
),
),
),
],
)
author_name_nested_deconstructible_extra_kwarg = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
default=DeconstructibleObject(
DeconstructibleObject(1),
(
DeconstructibleObject("t1"),
DeconstructibleObject("t2"),
),
a=DeconstructibleObject("A"),
b=DeconstructibleObject(B=DeconstructibleObject("c")),
c=None,
),
),
),
],
)
author_custom_pk = ModelState(
"testapp", "Author", [("pk_field", models.IntegerField(primary_key=True))]
)
author_with_biography_non_blank = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField()),
("biography", models.TextField()),
],
)
author_with_biography_blank = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(blank=True)),
("biography", models.TextField(blank=True)),
],
)
author_with_book = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
)
author_with_book_order_wrt = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
options={"order_with_respect_to": "book"},
)
author_renamed_with_book = ModelState(
"testapp",
"Writer",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
)
author_with_publisher_string = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("publisher_name", models.CharField(max_length=200)),
],
)
author_with_publisher = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)),
],
)
author_with_user = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("user", models.ForeignKey("auth.User", models.CASCADE)),
],
)
author_with_custom_user = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("user", models.ForeignKey("thirdapp.CustomUser", models.CASCADE)),
],
)
author_proxy = ModelState(
"testapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author",)
)
author_proxy_options = ModelState(
"testapp",
"AuthorProxy",
[],
{
"proxy": True,
"verbose_name": "Super Author",
},
("testapp.author",),
)
author_proxy_notproxy = ModelState(
"testapp", "AuthorProxy", [], {}, ("testapp.author",)
)
author_proxy_third = ModelState(
"thirdapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author",)
)
author_proxy_third_notproxy = ModelState(
"thirdapp", "AuthorProxy", [], {}, ("testapp.author",)
)
author_proxy_proxy = ModelState(
"testapp", "AAuthorProxyProxy", [], {"proxy": True}, ("testapp.authorproxy",)
)
author_unmanaged = ModelState(
"testapp", "AuthorUnmanaged", [], {"managed": False}, ("testapp.author",)
)
author_unmanaged_managed = ModelState(
"testapp", "AuthorUnmanaged", [], {}, ("testapp.author",)
)
author_unmanaged_default_pk = ModelState(
"testapp", "Author", [("id", models.AutoField(primary_key=True))]
)
author_unmanaged_custom_pk = ModelState(
"testapp",
"Author",
[
("pk_field", models.IntegerField(primary_key=True)),
],
)
author_with_m2m = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher")),
],
)
author_with_m2m_blank = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher", blank=True)),
],
)
author_with_m2m_through = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"publishers",
models.ManyToManyField("testapp.Publisher", through="testapp.Contract"),
),
],
)
author_with_renamed_m2m_through = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"publishers",
models.ManyToManyField("testapp.Publisher", through="testapp.Deal"),
),
],
)
author_with_former_m2m = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("publishers", models.CharField(max_length=100)),
],
)
author_with_options = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
],
{
"permissions": [("can_hire", "Can hire")],
"verbose_name": "Authi",
},
)
author_with_db_table_options = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
],
{"db_table": "author_one"},
)
author_with_new_db_table_options = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
],
{"db_table": "author_two"},
)
author_renamed_with_db_table_options = ModelState(
"testapp",
"NewAuthor",
[
("id", models.AutoField(primary_key=True)),
],
{"db_table": "author_one"},
)
author_renamed_with_new_db_table_options = ModelState(
"testapp",
"NewAuthor",
[
("id", models.AutoField(primary_key=True)),
],
{"db_table": "author_three"},
)
contract = ModelState(
"testapp",
"Contract",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)),
],
)
contract_renamed = ModelState(
"testapp",
"Deal",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)),
],
)
publisher = ModelState(
"testapp",
"Publisher",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
)
publisher_with_author = ModelState(
"testapp",
"Publisher",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("name", models.CharField(max_length=100)),
],
)
publisher_with_aardvark_author = ModelState(
"testapp",
"Publisher",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Aardvark", models.CASCADE)),
("name", models.CharField(max_length=100)),
],
)
publisher_with_book = ModelState(
"testapp",
"Publisher",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("otherapp.Book", models.CASCADE)),
("name", models.CharField(max_length=100)),
],
)
other_pony = ModelState(
"otherapp",
"Pony",
[
("id", models.AutoField(primary_key=True)),
],
)
other_pony_food = ModelState(
"otherapp",
"Pony",
[
("id", models.AutoField(primary_key=True)),
],
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
],
)
other_stable = ModelState(
"otherapp", "Stable", [("id", models.AutoField(primary_key=True))]
)
third_thing = ModelState(
"thirdapp", "Thing", [("id", models.AutoField(primary_key=True))]
)
book = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
)
book_proxy_fk = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("thirdapp.AuthorProxy", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
)
book_proxy_proxy_fk = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.AAuthorProxyProxy", models.CASCADE)),
],
)
book_migrations_fk = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("migrations.UnmigratedModel", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
)
book_with_no_author_fk = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.IntegerField()),
("title", models.CharField(max_length=200)),
],
)
book_with_no_author = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("title", models.CharField(max_length=200)),
],
)
book_with_author_renamed = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Writer", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
)
book_with_field_and_author_renamed = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("writer", models.ForeignKey("testapp.Writer", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
)
book_with_multiple_authors = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("authors", models.ManyToManyField("testapp.Author")),
("title", models.CharField(max_length=200)),
],
)
book_with_multiple_authors_through_attribution = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
(
"authors",
models.ManyToManyField(
"testapp.Author", through="otherapp.Attribution"
),
),
("title", models.CharField(max_length=200)),
],
)
book_indexes = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"indexes": [
models.Index(fields=["author", "title"], name="book_title_author_idx")
],
},
)
book_unordered_indexes = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"indexes": [
models.Index(fields=["title", "author"], name="book_author_title_idx")
],
},
)
book_unique_together = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"unique_together": {("author", "title")},
},
)
book_unique_together_2 = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"unique_together": {("title", "author")},
},
)
book_unique_together_3 = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("newfield", models.IntegerField()),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"unique_together": {("title", "newfield")},
},
)
book_unique_together_4 = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("newfield2", models.IntegerField()),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"unique_together": {("title", "newfield2")},
},
)
attribution = ModelState(
"otherapp",
"Attribution",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
)
edition = ModelState(
"thirdapp",
"Edition",
[
("id", models.AutoField(primary_key=True)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
)
custom_user = ModelState(
"thirdapp",
"CustomUser",
[
("id", models.AutoField(primary_key=True)),
("username", models.CharField(max_length=255)),
],
bases=(AbstractBaseUser,),
)
custom_user_no_inherit = ModelState(
"thirdapp",
"CustomUser",
[
("id", models.AutoField(primary_key=True)),
("username", models.CharField(max_length=255)),
],
)
aardvark = ModelState(
"thirdapp", "Aardvark", [("id", models.AutoField(primary_key=True))]
)
aardvark_testapp = ModelState(
"testapp", "Aardvark", [("id", models.AutoField(primary_key=True))]
)
aardvark_based_on_author = ModelState(
"testapp", "Aardvark", [], bases=("testapp.Author",)
)
aardvark_pk_fk_author = ModelState(
"testapp",
"Aardvark",
[
(
"id",
models.OneToOneField(
"testapp.Author", models.CASCADE, primary_key=True
),
),
],
)
knight = ModelState("eggs", "Knight", [("id", models.AutoField(primary_key=True))])
rabbit = ModelState(
"eggs",
"Rabbit",
[
("id", models.AutoField(primary_key=True)),
("knight", models.ForeignKey("eggs.Knight", models.CASCADE)),
("parent", models.ForeignKey("eggs.Rabbit", models.CASCADE)),
],
{
"unique_together": {("parent", "knight")},
"indexes": [
models.Index(
fields=["parent", "knight"], name="rabbit_circular_fk_index"
)
],
},
)
def test_arrange_for_graph(self):
"""Tests auto-naming of migrations for graph matching."""
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency(
"testapp.0002_foobar",
("testapp", "0002_foobar"),
("testapp", "0001_initial"),
)
graph.add_dependency(
"testapp.0002_foobar",
("testapp", "0002_foobar"),
("otherapp", "0001_initial"),
)
# Use project state to make a new migration change set
before = self.make_project_state([self.publisher, self.other_pony])
after = self.make_project_state(
[
self.author_empty,
self.publisher,
self.other_pony,
self.other_stable,
]
)
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
changes = autodetector.arrange_for_graph(changes, graph)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_author")
self.assertEqual(
changes["testapp"][0].dependencies, [("testapp", "0002_foobar")]
)
self.assertEqual(changes["otherapp"][0].name, "0002_stable")
self.assertEqual(
changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")]
)
def test_arrange_for_graph_with_multiple_initial(self):
# Make a fake graph.
graph = MigrationGraph()
# Use project state to make a new migration change set.
before = self.make_project_state([])
after = self.make_project_state(
[self.author_with_book, self.book, self.attribution]
)
autodetector = MigrationAutodetector(
before, after, MigrationQuestioner({"ask_initial": True})
)
changes = autodetector._detect_changes()
changes = autodetector.arrange_for_graph(changes, graph)
self.assertEqual(changes["otherapp"][0].name, "0001_initial")
self.assertEqual(changes["otherapp"][0].dependencies, [])
self.assertEqual(changes["otherapp"][1].name, "0002_initial")
self.assertCountEqual(
changes["otherapp"][1].dependencies,
[("testapp", "0001_initial"), ("otherapp", "0001_initial")],
)
self.assertEqual(changes["testapp"][0].name, "0001_initial")
self.assertEqual(
changes["testapp"][0].dependencies, [("otherapp", "0001_initial")]
)
def test_trim_apps(self):
"""
Trim does not remove dependencies but does remove unwanted apps.
"""
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state(
[self.author_empty, self.other_pony, self.other_stable, self.third_thing]
)
autodetector = MigrationAutodetector(
before, after, MigrationQuestioner({"ask_initial": True})
)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
graph = MigrationGraph()
changes = autodetector.arrange_for_graph(changes, graph)
changes["testapp"][0].dependencies.append(("otherapp", "0001_initial"))
changes = autodetector._trim_to_apps(changes, {"testapp"})
# Make sure there's the right set of migrations
self.assertEqual(changes["testapp"][0].name, "0001_initial")
self.assertEqual(changes["otherapp"][0].name, "0001_initial")
self.assertNotIn("thirdapp", changes)
def test_custom_migration_name(self):
"""Tests custom naming of migrations for graph matching."""
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency(
"testapp.0002_foobar",
("testapp", "0002_foobar"),
("testapp", "0001_initial"),
)
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state(
[self.author_empty, self.other_pony, self.other_stable]
)
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
migration_name = "custom_name"
changes = autodetector.arrange_for_graph(changes, graph, migration_name)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_%s" % migration_name)
self.assertEqual(
changes["testapp"][0].dependencies, [("testapp", "0002_foobar")]
)
self.assertEqual(changes["otherapp"][0].name, "0002_%s" % migration_name)
self.assertEqual(
changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")]
)
def test_new_model(self):
"""Tests autodetection of new models."""
changes = self.get_changes([], [self.other_pony_food])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Pony")
self.assertEqual(
[name for name, mgr in changes["otherapp"][0].operations[0].managers],
["food_qs", "food_mgr", "food_mgr_kwargs"],
)
def test_old_model(self):
"""Tests deletion of old models."""
changes = self.get_changes([self.author_empty], [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
def test_add_field(self):
"""Tests autodetection of new fields."""
changes = self.get_changes([self.author_empty], [self.author_name])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name")
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition",
side_effect=AssertionError("Should not have prompted for not null addition"),
)
def test_add_date_fields_with_auto_now_not_asking_for_default(
self, mocked_ask_method
):
changes = self.get_changes(
[self.author_empty], [self.author_dates_of_birth_auto_now]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["AddField", "AddField", "AddField"]
)
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, auto_now=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 1, auto_now=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 2, auto_now=True)
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition",
side_effect=AssertionError("Should not have prompted for not null addition"),
)
def test_add_date_fields_with_auto_now_add_not_asking_for_null_addition(
self, mocked_ask_method
):
changes = self.get_changes(
[self.author_empty], [self.author_dates_of_birth_auto_now_add]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["AddField", "AddField", "AddField"]
)
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, auto_now_add=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 1, auto_now_add=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 2, auto_now_add=True)
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_auto_now_add_addition"
)
def test_add_date_fields_with_auto_now_add_asking_for_default(
self, mocked_ask_method
):
changes = self.get_changes(
[self.author_empty], [self.author_dates_of_birth_auto_now_add]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["AddField", "AddField", "AddField"]
)
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, auto_now_add=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 1, auto_now_add=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 2, auto_now_add=True)
self.assertEqual(mocked_ask_method.call_count, 3)
def test_remove_field(self):
"""Tests autodetection of removed fields."""
changes = self.get_changes([self.author_name], [self.author_empty])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RemoveField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name")
def test_alter_field(self):
"""Tests autodetection of new fields."""
changes = self.get_changes([self.author_name], [self.author_name_longer])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="name", preserve_default=True
)
def test_supports_functools_partial(self):
def _content_file_name(instance, filename, key, **kwargs):
return "{}/{}".format(instance, filename)
def content_file_name(key, **kwargs):
return functools.partial(_content_file_name, key, **kwargs)
# An unchanged partial reference.
before = [
ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"file",
models.FileField(
max_length=200, upload_to=content_file_name("file")
),
),
],
)
]
after = [
ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"file",
models.FileField(
max_length=200, upload_to=content_file_name("file")
),
),
],
)
]
changes = self.get_changes(before, after)
self.assertNumberMigrations(changes, "testapp", 0)
# A changed partial reference.
args_changed = [
ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"file",
models.FileField(
max_length=200, upload_to=content_file_name("other-file")
),
),
],
)
]
changes = self.get_changes(before, args_changed)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
# Can't use assertOperationFieldAttributes because we need the
# deconstructed version, i.e., the exploded func/args/keywords rather
# than the partial: we don't care if it's not the same instance of the
# partial, only if it's the same source function, args, and keywords.
value = changes["testapp"][0].operations[0].field.upload_to
self.assertEqual(
(_content_file_name, ("other-file",), {}),
(value.func, value.args, value.keywords),
)
kwargs_changed = [
ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"file",
models.FileField(
max_length=200,
upload_to=content_file_name("file", spam="eggs"),
),
),
],
)
]
changes = self.get_changes(before, kwargs_changed)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
value = changes["testapp"][0].operations[0].field.upload_to
self.assertEqual(
(_content_file_name, ("file",), {"spam": "eggs"}),
(value.func, value.args, value.keywords),
)
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration",
side_effect=AssertionError("Should not have prompted for not null addition"),
)
def test_alter_field_to_not_null_with_default(self, mocked_ask_method):
"""
#23609 - Tests autodetection of nullable to non-nullable alterations.
"""
changes = self.get_changes([self.author_name_null], [self.author_name_default])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="name", preserve_default=True
)
self.assertOperationFieldAttributes(
changes, "testapp", 0, 0, default="Ada Lovelace"
)
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration",
return_value=models.NOT_PROVIDED,
)
def test_alter_field_to_not_null_without_default(self, mocked_ask_method):
"""
#23609 - Tests autodetection of nullable to non-nullable alterations.
"""
changes = self.get_changes([self.author_name_null], [self.author_name])
self.assertEqual(mocked_ask_method.call_count, 1)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="name", preserve_default=True
)
self.assertOperationFieldAttributes(
changes, "testapp", 0, 0, default=models.NOT_PROVIDED
)
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration",
return_value="Some Name",
)
def test_alter_field_to_not_null_oneoff_default(self, mocked_ask_method):
"""
#23609 - Tests autodetection of nullable to non-nullable alterations.
"""
changes = self.get_changes([self.author_name_null], [self.author_name])
self.assertEqual(mocked_ask_method.call_count, 1)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="name", preserve_default=False
)
self.assertOperationFieldAttributes(
changes, "testapp", 0, 0, default="Some Name"
)
def test_rename_field(self):
"""Tests autodetection of renamed fields."""
changes = self.get_changes(
[self.author_name],
[self.author_name_renamed],
MigrationQuestioner({"ask_rename": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RenameField"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, old_name="name", new_name="names"
)
def test_rename_field_foreign_key_to_field(self):
before = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField(unique=True)),
],
),
ModelState(
"app",
"Bar",
[
("id", models.AutoField(primary_key=True)),
(
"foo",
models.ForeignKey("app.Foo", models.CASCADE, to_field="field"),
),
],
),
]
after = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
("renamed_field", models.IntegerField(unique=True)),
],
),
ModelState(
"app",
"Bar",
[
("id", models.AutoField(primary_key=True)),
(
"foo",
models.ForeignKey(
"app.Foo", models.CASCADE, to_field="renamed_field"
),
),
],
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename": True})
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["RenameField"])
self.assertOperationAttributes(
changes, "app", 0, 0, old_name="field", new_name="renamed_field"
)
def test_rename_foreign_object_fields(self):
fields = ("first", "second")
renamed_fields = ("first_renamed", "second_renamed")
before = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
("first", models.IntegerField()),
("second", models.IntegerField()),
],
options={"unique_together": {fields}},
),
ModelState(
"app",
"Bar",
[
("id", models.AutoField(primary_key=True)),
("first", models.IntegerField()),
("second", models.IntegerField()),
(
"foo",
models.ForeignObject(
"app.Foo",
models.CASCADE,
from_fields=fields,
to_fields=fields,
),
),
],
),
]
# Case 1: to_fields renames.
after = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
("first_renamed", models.IntegerField()),
("second_renamed", models.IntegerField()),
],
options={"unique_together": {renamed_fields}},
),
ModelState(
"app",
"Bar",
[
("id", models.AutoField(primary_key=True)),
("first", models.IntegerField()),
("second", models.IntegerField()),
(
"foo",
models.ForeignObject(
"app.Foo",
models.CASCADE,
from_fields=fields,
to_fields=renamed_fields,
),
),
],
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename": True})
)
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(
changes, "app", 0, ["RenameField", "RenameField", "AlterUniqueTogether"]
)
self.assertOperationAttributes(
changes,
"app",
0,
0,
model_name="foo",
old_name="first",
new_name="first_renamed",
)
self.assertOperationAttributes(
changes,
"app",
0,
1,
model_name="foo",
old_name="second",
new_name="second_renamed",
)
# Case 2: from_fields renames.
after = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
("first", models.IntegerField()),
("second", models.IntegerField()),
],
options={"unique_together": {fields}},
),
ModelState(
"app",
"Bar",
[
("id", models.AutoField(primary_key=True)),
("first_renamed", models.IntegerField()),
("second_renamed", models.IntegerField()),
(
"foo",
models.ForeignObject(
"app.Foo",
models.CASCADE,
from_fields=renamed_fields,
to_fields=fields,
),
),
],
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename": True})
)
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["RenameField", "RenameField"])
self.assertOperationAttributes(
changes,
"app",
0,
0,
model_name="bar",
old_name="first",
new_name="first_renamed",
)
self.assertOperationAttributes(
changes,
"app",
0,
1,
model_name="bar",
old_name="second",
new_name="second_renamed",
)
def test_rename_referenced_primary_key(self):
before = [
ModelState(
"app",
"Foo",
[
("id", models.CharField(primary_key=True, serialize=False)),
],
),
ModelState(
"app",
"Bar",
[
("id", models.AutoField(primary_key=True)),
("foo", models.ForeignKey("app.Foo", models.CASCADE)),
],
),
]
after = [
ModelState(
"app",
"Foo",
[("renamed_id", models.CharField(primary_key=True, serialize=False))],
),
ModelState(
"app",
"Bar",
[
("id", models.AutoField(primary_key=True)),
("foo", models.ForeignKey("app.Foo", models.CASCADE)),
],
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename": True})
)
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["RenameField"])
self.assertOperationAttributes(
changes, "app", 0, 0, old_name="id", new_name="renamed_id"
)
def test_rename_field_preserved_db_column(self):
"""
RenameField is used if a field is renamed and db_column equal to the
old field's column is added.
"""
before = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField()),
],
),
]
after = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
("renamed_field", models.IntegerField(db_column="field")),
],
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename": True})
)
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["AlterField", "RenameField"])
self.assertOperationAttributes(
changes,
"app",
0,
0,
model_name="foo",
name="field",
)
self.assertEqual(
changes["app"][0].operations[0].field.deconstruct(),
(
"field",
"django.db.models.IntegerField",
[],
{"db_column": "field"},
),
)
self.assertOperationAttributes(
changes,
"app",
0,
1,
model_name="foo",
old_name="field",
new_name="renamed_field",
)
def test_rename_related_field_preserved_db_column(self):
before = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
],
),
ModelState(
"app",
"Bar",
[
("id", models.AutoField(primary_key=True)),
("foo", models.ForeignKey("app.Foo", models.CASCADE)),
],
),
]
after = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
],
),
ModelState(
"app",
"Bar",
[
("id", models.AutoField(primary_key=True)),
(
"renamed_foo",
models.ForeignKey(
"app.Foo", models.CASCADE, db_column="foo_id"
),
),
],
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename": True})
)
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["AlterField", "RenameField"])
self.assertOperationAttributes(
changes,
"app",
0,
0,
model_name="bar",
name="foo",
)
self.assertEqual(
changes["app"][0].operations[0].field.deconstruct(),
(
"foo",
"django.db.models.ForeignKey",
[],
{"to": "app.foo", "on_delete": models.CASCADE, "db_column": "foo_id"},
),
)
self.assertOperationAttributes(
changes,
"app",
0,
1,
model_name="bar",
old_name="foo",
new_name="renamed_foo",
)
def test_rename_field_with_renamed_model(self):
changes = self.get_changes(
[self.author_name],
[
ModelState(
"testapp",
"RenamedAuthor",
[
("id", models.AutoField(primary_key=True)),
("renamed_name", models.CharField(max_length=200)),
],
),
],
MigrationQuestioner({"ask_rename_model": True, "ask_rename": True}),
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RenameModel", "RenameField"])
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
old_name="Author",
new_name="RenamedAuthor",
)
self.assertOperationAttributes(
changes,
"testapp",
0,
1,
old_name="name",
new_name="renamed_name",
)
def test_rename_model(self):
"""Tests autodetection of renamed models."""
changes = self.get_changes(
[self.author_with_book, self.book],
[self.author_renamed_with_book, self.book_with_author_renamed],
MigrationQuestioner({"ask_rename_model": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, old_name="Author", new_name="Writer"
)
# Now that RenameModel handles related fields too, there should be
# no AlterField for the related field.
self.assertNumberMigrations(changes, "otherapp", 0)
def test_rename_model_case(self):
"""
Model name is case-insensitive. Changing case doesn't lead to any
autodetected operations.
"""
author_renamed = ModelState(
"testapp",
"author",
[
("id", models.AutoField(primary_key=True)),
],
)
changes = self.get_changes(
[self.author_empty, self.book],
[author_renamed, self.book],
questioner=MigrationQuestioner({"ask_rename_model": True}),
)
self.assertNumberMigrations(changes, "testapp", 0)
self.assertNumberMigrations(changes, "otherapp", 0)
def test_renamed_referenced_m2m_model_case(self):
publisher_renamed = ModelState(
"testapp",
"publisher",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
)
changes = self.get_changes(
[self.publisher, self.author_with_m2m],
[publisher_renamed, self.author_with_m2m],
questioner=MigrationQuestioner({"ask_rename_model": True}),
)
self.assertNumberMigrations(changes, "testapp", 0)
self.assertNumberMigrations(changes, "otherapp", 0)
def test_rename_m2m_through_model(self):
"""
Tests autodetection of renamed models that are used in M2M relations as
through models.
"""
changes = self.get_changes(
[self.author_with_m2m_through, self.publisher, self.contract],
[
self.author_with_renamed_m2m_through,
self.publisher,
self.contract_renamed,
],
MigrationQuestioner({"ask_rename_model": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, old_name="Contract", new_name="Deal"
)
def test_rename_model_with_renamed_rel_field(self):
"""
Tests autodetection of renamed models while simultaneously renaming one
of the fields that relate to the renamed model.
"""
changes = self.get_changes(
[self.author_with_book, self.book],
[self.author_renamed_with_book, self.book_with_field_and_author_renamed],
MigrationQuestioner({"ask_rename": True, "ask_rename_model": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, old_name="Author", new_name="Writer"
)
# Right number/type of migrations for related field rename?
# Alter is already taken care of.
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RenameField"])
self.assertOperationAttributes(
changes, "otherapp", 0, 0, old_name="author", new_name="writer"
)
def test_rename_model_with_fks_in_different_position(self):
"""
#24537 - The order of fields in a model does not influence
the RenameModel detection.
"""
before = [
ModelState(
"testapp",
"EntityA",
[
("id", models.AutoField(primary_key=True)),
],
),
ModelState(
"testapp",
"EntityB",
[
("id", models.AutoField(primary_key=True)),
("some_label", models.CharField(max_length=255)),
("entity_a", models.ForeignKey("testapp.EntityA", models.CASCADE)),
],
),
]
after = [
ModelState(
"testapp",
"EntityA",
[
("id", models.AutoField(primary_key=True)),
],
),
ModelState(
"testapp",
"RenamedEntityB",
[
("id", models.AutoField(primary_key=True)),
("entity_a", models.ForeignKey("testapp.EntityA", models.CASCADE)),
("some_label", models.CharField(max_length=255)),
],
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename_model": True})
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, old_name="EntityB", new_name="RenamedEntityB"
)
def test_rename_model_reverse_relation_dependencies(self):
"""
The migration to rename a model pointed to by a foreign key in another
app must run after the other app's migration that adds the foreign key
with model's original name. Therefore, the renaming migration has a
dependency on that other migration.
"""
before = [
ModelState(
"testapp",
"EntityA",
[
("id", models.AutoField(primary_key=True)),
],
),
ModelState(
"otherapp",
"EntityB",
[
("id", models.AutoField(primary_key=True)),
("entity_a", models.ForeignKey("testapp.EntityA", models.CASCADE)),
],
),
]
after = [
ModelState(
"testapp",
"RenamedEntityA",
[
("id", models.AutoField(primary_key=True)),
],
),
ModelState(
"otherapp",
"EntityB",
[
("id", models.AutoField(primary_key=True)),
(
"entity_a",
models.ForeignKey("testapp.RenamedEntityA", models.CASCADE),
),
],
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename_model": True})
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertMigrationDependencies(
changes, "testapp", 0, [("otherapp", "__first__")]
)
self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, old_name="EntityA", new_name="RenamedEntityA"
)
def test_fk_dependency(self):
"""Having a ForeignKey automatically adds a dependency."""
# Note that testapp (author) has no dependencies,
# otherapp (book) depends on testapp (author),
# thirdapp (edition) depends on otherapp (book)
changes = self.get_changes([], [self.author_name, self.book, self.edition])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertMigrationDependencies(changes, "testapp", 0, [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Book")
self.assertMigrationDependencies(
changes, "otherapp", 0, [("testapp", "auto_1")]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "thirdapp", 1)
self.assertOperationTypes(changes, "thirdapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "thirdapp", 0, 0, name="Edition")
self.assertMigrationDependencies(
changes, "thirdapp", 0, [("otherapp", "auto_1")]
)
def test_proxy_fk_dependency(self):
"""FK dependencies still work on proxy models."""
# Note that testapp (author) has no dependencies,
# otherapp (book) depends on testapp (authorproxy)
changes = self.get_changes(
[], [self.author_empty, self.author_proxy_third, self.book_proxy_fk]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertMigrationDependencies(changes, "testapp", 0, [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Book")
self.assertMigrationDependencies(
changes, "otherapp", 0, [("thirdapp", "auto_1")]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "thirdapp", 1)
self.assertOperationTypes(changes, "thirdapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "thirdapp", 0, 0, name="AuthorProxy")
self.assertMigrationDependencies(
changes, "thirdapp", 0, [("testapp", "auto_1")]
)
def test_same_app_no_fk_dependency(self):
"""
A migration with a FK between two models of the same app
does not have a dependency to itself.
"""
changes = self.get_changes([], [self.author_with_publisher, self.publisher])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Author")
self.assertMigrationDependencies(changes, "testapp", 0, [])
def test_circular_fk_dependency(self):
"""
Having a circular ForeignKey dependency automatically
resolves the situation into 2 migrations on one side and 1 on the other.
"""
changes = self.get_changes(
[], [self.author_with_book, self.book, self.publisher_with_book]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Author")
self.assertMigrationDependencies(
changes, "testapp", 0, [("otherapp", "auto_1")]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 2)
self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"])
self.assertOperationTypes(changes, "otherapp", 1, ["AddField"])
self.assertMigrationDependencies(changes, "otherapp", 0, [])
self.assertMigrationDependencies(
changes, "otherapp", 1, [("otherapp", "auto_1"), ("testapp", "auto_1")]
)
# both split migrations should be `initial`
self.assertTrue(changes["otherapp"][0].initial)
self.assertTrue(changes["otherapp"][1].initial)
def test_same_app_circular_fk_dependency(self):
"""
A migration with a FK between two models of the same app does
not have a dependency to itself.
"""
changes = self.get_changes(
[], [self.author_with_publisher, self.publisher_with_author]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["CreateModel", "CreateModel", "AddField"]
)
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher")
self.assertMigrationDependencies(changes, "testapp", 0, [])
def test_same_app_circular_fk_dependency_with_unique_together_and_indexes(self):
"""
#22275 - A migration with circular FK dependency does not try
to create unique together constraint and indexes before creating all
required fields first.
"""
changes = self.get_changes([], [self.knight, self.rabbit])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "eggs", 1)
self.assertOperationTypes(
changes,
"eggs",
0,
["CreateModel", "CreateModel", "AddIndex", "AlterUniqueTogether"],
)
self.assertNotIn("unique_together", changes["eggs"][0].operations[0].options)
self.assertNotIn("unique_together", changes["eggs"][0].operations[1].options)
self.assertMigrationDependencies(changes, "eggs", 0, [])
def test_alter_db_table_add(self):
"""Tests detection for adding db_table in model's options."""
changes = self.get_changes(
[self.author_empty], [self.author_with_db_table_options]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelTable"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="author", table="author_one"
)
def test_alter_db_table_change(self):
"""Tests detection for changing db_table in model's options'."""
changes = self.get_changes(
[self.author_with_db_table_options], [self.author_with_new_db_table_options]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelTable"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="author", table="author_two"
)
def test_alter_db_table_remove(self):
"""Tests detection for removing db_table in model's options."""
changes = self.get_changes(
[self.author_with_db_table_options], [self.author_empty]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelTable"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="author", table=None
)
def test_alter_db_table_no_changes(self):
"""
Alter_db_table doesn't generate a migration if no changes have been made.
"""
changes = self.get_changes(
[self.author_with_db_table_options], [self.author_with_db_table_options]
)
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_keep_db_table_with_model_change(self):
"""
Tests when model changes but db_table stays as-is, autodetector must not
create more than one operation.
"""
changes = self.get_changes(
[self.author_with_db_table_options],
[self.author_renamed_with_db_table_options],
MigrationQuestioner({"ask_rename_model": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, old_name="Author", new_name="NewAuthor"
)
def test_alter_db_table_with_model_change(self):
"""
Tests when model and db_table changes, autodetector must create two
operations.
"""
changes = self.get_changes(
[self.author_with_db_table_options],
[self.author_renamed_with_new_db_table_options],
MigrationQuestioner({"ask_rename_model": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["RenameModel", "AlterModelTable"]
)
self.assertOperationAttributes(
changes, "testapp", 0, 0, old_name="Author", new_name="NewAuthor"
)
self.assertOperationAttributes(
changes, "testapp", 0, 1, name="newauthor", table="author_three"
)
def test_identical_regex_doesnt_alter(self):
from_state = ModelState(
"testapp",
"model",
[
(
"id",
models.AutoField(
primary_key=True,
validators=[
RegexValidator(
re.compile("^[-a-zA-Z0-9_]+\\Z"),
"Enter a valid “slug” consisting of letters, numbers, "
"underscores or hyphens.",
"invalid",
)
],
),
)
],
)
to_state = ModelState(
"testapp",
"model",
[("id", models.AutoField(primary_key=True, validators=[validate_slug]))],
)
changes = self.get_changes([from_state], [to_state])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 0)
def test_different_regex_does_alter(self):
from_state = ModelState(
"testapp",
"model",
[
(
"id",
models.AutoField(
primary_key=True,
validators=[
RegexValidator(
re.compile("^[a-z]+\\Z", 32),
"Enter a valid “slug” consisting of letters, numbers, "
"underscores or hyphens.",
"invalid",
)
],
),
)
],
)
to_state = ModelState(
"testapp",
"model",
[("id", models.AutoField(primary_key=True, validators=[validate_slug]))],
)
changes = self.get_changes([from_state], [to_state])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
def test_alter_regex_string_to_compiled_regex(self):
regex_string = "^[a-z]+$"
from_state = ModelState(
"testapp",
"model",
[
(
"id",
models.AutoField(
primary_key=True, validators=[RegexValidator(regex_string)]
),
)
],
)
to_state = ModelState(
"testapp",
"model",
[
(
"id",
models.AutoField(
primary_key=True,
validators=[RegexValidator(re.compile(regex_string))],
),
)
],
)
changes = self.get_changes([from_state], [to_state])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
def test_empty_unique_together(self):
"""Empty unique_together shouldn't generate a migration."""
# Explicitly testing for not specified, since this is the case after
# a CreateModel operation w/o any definition on the original model
model_state_not_specified = ModelState(
"a", "model", [("id", models.AutoField(primary_key=True))]
)
# Explicitly testing for None, since this was the issue in #23452 after
# an AlterUniqueTogether operation with e.g. () as value
model_state_none = ModelState(
"a",
"model",
[("id", models.AutoField(primary_key=True))],
{
"unique_together": None,
},
)
# Explicitly testing for the empty set, since we now always have sets.
# During removal (('col1', 'col2'),) --> () this becomes set([])
model_state_empty = ModelState(
"a",
"model",
[("id", models.AutoField(primary_key=True))],
{
"unique_together": set(),
},
)
def test(from_state, to_state, msg):
changes = self.get_changes([from_state], [to_state])
if changes:
ops = ", ".join(
o.__class__.__name__ for o in changes["a"][0].operations
)
self.fail("Created operation(s) %s from %s" % (ops, msg))
tests = (
(
model_state_not_specified,
model_state_not_specified,
'"not specified" to "not specified"',
),
(model_state_not_specified, model_state_none, '"not specified" to "None"'),
(
model_state_not_specified,
model_state_empty,
'"not specified" to "empty"',
),
(model_state_none, model_state_not_specified, '"None" to "not specified"'),
(model_state_none, model_state_none, '"None" to "None"'),
(model_state_none, model_state_empty, '"None" to "empty"'),
(
model_state_empty,
model_state_not_specified,
'"empty" to "not specified"',
),
(model_state_empty, model_state_none, '"empty" to "None"'),
(model_state_empty, model_state_empty, '"empty" to "empty"'),
)
for t in tests:
test(*t)
def test_create_model_with_indexes(self):
"""Test creation of new model with indexes already defined."""
author = ModelState(
"otherapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
],
{
"indexes": [
models.Index(fields=["name"], name="create_model_with_indexes_idx")
]
},
)
changes = self.get_changes([], [author])
added_index = models.Index(
fields=["name"], name="create_model_with_indexes_idx"
)
# Right number of migrations?
self.assertEqual(len(changes["otherapp"]), 1)
# Right number of actions?
migration = changes["otherapp"][0]
self.assertEqual(len(migration.operations), 2)
# Right actions order?
self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel", "AddIndex"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Author")
self.assertOperationAttributes(
changes, "otherapp", 0, 1, model_name="author", index=added_index
)
def test_add_indexes(self):
"""Test change detection of new indexes."""
changes = self.get_changes(
[self.author_empty, self.book], [self.author_empty, self.book_indexes]
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AddIndex"])
added_index = models.Index(
fields=["author", "title"], name="book_title_author_idx"
)
self.assertOperationAttributes(
changes, "otherapp", 0, 0, model_name="book", index=added_index
)
def test_remove_indexes(self):
"""Test change detection of removed indexes."""
changes = self.get_changes(
[self.author_empty, self.book_indexes], [self.author_empty, self.book]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RemoveIndex"])
self.assertOperationAttributes(
changes, "otherapp", 0, 0, model_name="book", name="book_title_author_idx"
)
def test_rename_indexes(self):
book_renamed_indexes = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"indexes": [
models.Index(
fields=["author", "title"], name="renamed_book_title_author_idx"
)
],
},
)
changes = self.get_changes(
[self.author_empty, self.book_indexes],
[self.author_empty, book_renamed_indexes],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RenameIndex"])
self.assertOperationAttributes(
changes,
"otherapp",
0,
0,
model_name="book",
new_name="renamed_book_title_author_idx",
old_name="book_title_author_idx",
)
def test_order_fields_indexes(self):
"""Test change detection of reordering of fields in indexes."""
changes = self.get_changes(
[self.author_empty, self.book_indexes],
[self.author_empty, self.book_unordered_indexes],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RemoveIndex", "AddIndex"])
self.assertOperationAttributes(
changes, "otherapp", 0, 0, model_name="book", name="book_title_author_idx"
)
added_index = models.Index(
fields=["title", "author"], name="book_author_title_idx"
)
self.assertOperationAttributes(
changes, "otherapp", 0, 1, model_name="book", index=added_index
)
def test_create_model_with_check_constraint(self):
"""Test creation of new model with constraints already defined."""
author = ModelState(
"otherapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
],
{
"constraints": [
models.CheckConstraint(
check=models.Q(name__contains="Bob"), name="name_contains_bob"
)
]
},
)
changes = self.get_changes([], [author])
added_constraint = models.CheckConstraint(
check=models.Q(name__contains="Bob"), name="name_contains_bob"
)
# Right number of migrations?
self.assertEqual(len(changes["otherapp"]), 1)
# Right number of actions?
migration = changes["otherapp"][0]
self.assertEqual(len(migration.operations), 2)
# Right actions order?
self.assertOperationTypes(
changes, "otherapp", 0, ["CreateModel", "AddConstraint"]
)
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Author")
self.assertOperationAttributes(
changes, "otherapp", 0, 1, model_name="author", constraint=added_constraint
)
def test_add_constraints(self):
"""Test change detection of new constraints."""
changes = self.get_changes(
[self.author_name], [self.author_name_check_constraint]
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AddConstraint"])
added_constraint = models.CheckConstraint(
check=models.Q(name__contains="Bob"), name="name_contains_bob"
)
self.assertOperationAttributes(
changes, "testapp", 0, 0, model_name="author", constraint=added_constraint
)
def test_remove_constraints(self):
"""Test change detection of removed constraints."""
changes = self.get_changes(
[self.author_name_check_constraint], [self.author_name]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RemoveConstraint"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, model_name="author", name="name_contains_bob"
)
def test_add_unique_together(self):
"""Tests unique_together detection."""
changes = self.get_changes(
[self.author_empty, self.book],
[self.author_empty, self.book_unique_together],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether"])
self.assertOperationAttributes(
changes,
"otherapp",
0,
0,
name="book",
unique_together={("author", "title")},
)
def test_remove_unique_together(self):
"""Tests unique_together detection."""
changes = self.get_changes(
[self.author_empty, self.book_unique_together],
[self.author_empty, self.book],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether"])
self.assertOperationAttributes(
changes, "otherapp", 0, 0, name="book", unique_together=set()
)
def test_unique_together_remove_fk(self):
"""Tests unique_together and field removal detection & ordering"""
changes = self.get_changes(
[self.author_empty, self.book_unique_together],
[self.author_empty, self.book_with_no_author],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["AlterUniqueTogether", "RemoveField"],
)
self.assertOperationAttributes(
changes, "otherapp", 0, 0, name="book", unique_together=set()
)
self.assertOperationAttributes(
changes, "otherapp", 0, 1, model_name="book", name="author"
)
def test_unique_together_no_changes(self):
"""
unique_together doesn't generate a migration if no
changes have been made.
"""
changes = self.get_changes(
[self.author_empty, self.book_unique_together],
[self.author_empty, self.book_unique_together],
)
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_unique_together_ordering(self):
"""
unique_together also triggers on ordering changes.
"""
changes = self.get_changes(
[self.author_empty, self.book_unique_together],
[self.author_empty, self.book_unique_together_2],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["AlterUniqueTogether"],
)
self.assertOperationAttributes(
changes,
"otherapp",
0,
0,
name="book",
unique_together={("title", "author")},
)
def test_add_field_and_unique_together(self):
"""
Added fields will be created before using them in unique_together.
"""
changes = self.get_changes(
[self.author_empty, self.book],
[self.author_empty, self.book_unique_together_3],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["AddField", "AlterUniqueTogether"],
)
self.assertOperationAttributes(
changes,
"otherapp",
0,
1,
name="book",
unique_together={("title", "newfield")},
)
def test_create_model_and_unique_together(self):
author = ModelState(
"otherapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
],
)
book_with_author = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("otherapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"unique_together": {("title", "author")},
},
)
changes = self.get_changes(
[self.book_with_no_author], [author, book_with_author]
)
# Right number of migrations?
self.assertEqual(len(changes["otherapp"]), 1)
# Right number of actions?
migration = changes["otherapp"][0]
self.assertEqual(len(migration.operations), 3)
# Right actions order?
self.assertOperationTypes(
changes,
"otherapp",
0,
["CreateModel", "AddField", "AlterUniqueTogether"],
)
def test_remove_field_and_unique_together(self):
"""
Removed fields will be removed after updating unique_together.
"""
changes = self.get_changes(
[self.author_empty, self.book_unique_together_3],
[self.author_empty, self.book_unique_together],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["AlterUniqueTogether", "RemoveField"],
)
self.assertOperationAttributes(
changes,
"otherapp",
0,
0,
name="book",
unique_together={("author", "title")},
)
self.assertOperationAttributes(
changes,
"otherapp",
0,
1,
model_name="book",
name="newfield",
)
def test_alter_field_and_unique_together(self):
"""Fields are altered after deleting some unique_together."""
initial_author = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("age", models.IntegerField(db_index=True)),
],
{
"unique_together": {("name",)},
},
)
author_reversed_constraints = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, unique=True)),
("age", models.IntegerField()),
],
{
"unique_together": {("age",)},
},
)
changes = self.get_changes([initial_author], [author_reversed_constraints])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
[
"AlterUniqueTogether",
"AlterField",
"AlterField",
"AlterUniqueTogether",
],
)
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="author",
unique_together=set(),
)
self.assertOperationAttributes(
changes,
"testapp",
0,
1,
model_name="author",
name="age",
)
self.assertOperationAttributes(
changes,
"testapp",
0,
2,
model_name="author",
name="name",
)
self.assertOperationAttributes(
changes,
"testapp",
0,
3,
name="author",
unique_together={("age",)},
)
def test_partly_alter_unique_together_increase(self):
initial_author = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("age", models.IntegerField()),
],
{
"unique_together": {("name",)},
},
)
author_new_constraints = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("age", models.IntegerField()),
],
{
"unique_together": {("name",), ("age",)},
},
)
changes = self.get_changes([initial_author], [author_new_constraints])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
["AlterUniqueTogether"],
)
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="author",
unique_together={("name",), ("age",)},
)
def test_partly_alter_unique_together_decrease(self):
initial_author = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("age", models.IntegerField()),
],
{
"unique_together": {("name",), ("age",)},
},
)
author_new_constraints = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("age", models.IntegerField()),
],
{
"unique_together": {("name",)},
},
)
changes = self.get_changes([initial_author], [author_new_constraints])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
["AlterUniqueTogether"],
)
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="author",
unique_together={("name",)},
)
def test_rename_field_and_unique_together(self):
"""Fields are renamed before updating unique_together."""
changes = self.get_changes(
[self.author_empty, self.book_unique_together_3],
[self.author_empty, self.book_unique_together_4],
MigrationQuestioner({"ask_rename": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["RenameField", "AlterUniqueTogether"],
)
self.assertOperationAttributes(
changes,
"otherapp",
0,
1,
name="book",
unique_together={("title", "newfield2")},
)
def test_proxy(self):
"""The autodetector correctly deals with proxy models."""
# First, we test adding a proxy model
changes = self.get_changes(
[self.author_empty], [self.author_empty, self.author_proxy]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="AuthorProxy",
options={"proxy": True, "indexes": [], "constraints": []},
)
# Now, we test turning a proxy model into a non-proxy model
# It should delete the proxy then make the real one
changes = self.get_changes(
[self.author_empty, self.author_proxy],
[self.author_empty, self.author_proxy_notproxy],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="AuthorProxy")
self.assertOperationAttributes(
changes, "testapp", 0, 1, name="AuthorProxy", options={}
)
def test_proxy_non_model_parent(self):
class Mixin:
pass
author_proxy_non_model_parent = ModelState(
"testapp",
"AuthorProxy",
[],
{"proxy": True},
(Mixin, "testapp.author"),
)
changes = self.get_changes(
[self.author_empty],
[self.author_empty, author_proxy_non_model_parent],
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="AuthorProxy",
options={"proxy": True, "indexes": [], "constraints": []},
bases=(Mixin, "testapp.author"),
)
def test_proxy_custom_pk(self):
"""
#23415 - The autodetector must correctly deal with custom FK on proxy
models.
"""
# First, we test the default pk field name
changes = self.get_changes(
[], [self.author_empty, self.author_proxy_third, self.book_proxy_fk]
)
# The model the FK is pointing from and to.
self.assertEqual(
changes["otherapp"][0].operations[0].fields[2][1].remote_field.model,
"thirdapp.AuthorProxy",
)
# Now, we test the custom pk field name
changes = self.get_changes(
[], [self.author_custom_pk, self.author_proxy_third, self.book_proxy_fk]
)
# The model the FK is pointing from and to.
self.assertEqual(
changes["otherapp"][0].operations[0].fields[2][1].remote_field.model,
"thirdapp.AuthorProxy",
)
def test_proxy_to_mti_with_fk_to_proxy(self):
# First, test the pk table and field name.
to_state = self.make_project_state(
[self.author_empty, self.author_proxy_third, self.book_proxy_fk],
)
changes = self.get_changes([], to_state)
fk_field = changes["otherapp"][0].operations[0].fields[2][1]
self.assertEqual(
to_state.get_concrete_model_key(fk_field.remote_field.model),
("testapp", "author"),
)
self.assertEqual(fk_field.remote_field.model, "thirdapp.AuthorProxy")
# Change AuthorProxy to use MTI.
from_state = to_state.clone()
to_state = self.make_project_state(
[self.author_empty, self.author_proxy_third_notproxy, self.book_proxy_fk],
)
changes = self.get_changes(from_state, to_state)
# Right number/type of migrations for the AuthorProxy model?
self.assertNumberMigrations(changes, "thirdapp", 1)
self.assertOperationTypes(
changes, "thirdapp", 0, ["DeleteModel", "CreateModel"]
)
# Right number/type of migrations for the Book model with a FK to
# AuthorProxy?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterField"])
# otherapp should depend on thirdapp.
self.assertMigrationDependencies(
changes, "otherapp", 0, [("thirdapp", "auto_1")]
)
# Now, test the pk table and field name.
fk_field = changes["otherapp"][0].operations[0].field
self.assertEqual(
to_state.get_concrete_model_key(fk_field.remote_field.model),
("thirdapp", "authorproxy"),
)
self.assertEqual(fk_field.remote_field.model, "thirdapp.AuthorProxy")
def test_proxy_to_mti_with_fk_to_proxy_proxy(self):
# First, test the pk table and field name.
to_state = self.make_project_state(
[
self.author_empty,
self.author_proxy,
self.author_proxy_proxy,
self.book_proxy_proxy_fk,
]
)
changes = self.get_changes([], to_state)
fk_field = changes["otherapp"][0].operations[0].fields[1][1]
self.assertEqual(
to_state.get_concrete_model_key(fk_field.remote_field.model),
("testapp", "author"),
)
self.assertEqual(fk_field.remote_field.model, "testapp.AAuthorProxyProxy")
# Change AuthorProxy to use MTI. FK still points to AAuthorProxyProxy,
# a proxy of AuthorProxy.
from_state = to_state.clone()
to_state = self.make_project_state(
[
self.author_empty,
self.author_proxy_notproxy,
self.author_proxy_proxy,
self.book_proxy_proxy_fk,
]
)
changes = self.get_changes(from_state, to_state)
# Right number/type of migrations for the AuthorProxy model?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel", "CreateModel"])
# Right number/type of migrations for the Book model with a FK to
# AAuthorProxyProxy?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterField"])
# otherapp should depend on testapp.
self.assertMigrationDependencies(
changes, "otherapp", 0, [("testapp", "auto_1")]
)
# Now, test the pk table and field name.
fk_field = changes["otherapp"][0].operations[0].field
self.assertEqual(
to_state.get_concrete_model_key(fk_field.remote_field.model),
("testapp", "authorproxy"),
)
self.assertEqual(fk_field.remote_field.model, "testapp.AAuthorProxyProxy")
def test_unmanaged_create(self):
"""The autodetector correctly deals with managed models."""
# First, we test adding an unmanaged model
changes = self.get_changes(
[self.author_empty], [self.author_empty, self.author_unmanaged]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="AuthorUnmanaged", options={"managed": False}
)
def test_unmanaged_delete(self):
changes = self.get_changes(
[self.author_empty, self.author_unmanaged], [self.author_empty]
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel"])
def test_unmanaged_to_managed(self):
# Now, we test turning an unmanaged model into a managed model
changes = self.get_changes(
[self.author_empty, self.author_unmanaged],
[self.author_empty, self.author_unmanaged_managed],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="authorunmanaged", options={}
)
def test_managed_to_unmanaged(self):
# Now, we turn managed to unmanaged.
changes = self.get_changes(
[self.author_empty, self.author_unmanaged_managed],
[self.author_empty, self.author_unmanaged],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="authorunmanaged", options={"managed": False}
)
def test_unmanaged_custom_pk(self):
"""
#23415 - The autodetector must correctly deal with custom FK on
unmanaged models.
"""
# First, we test the default pk field name
changes = self.get_changes([], [self.author_unmanaged_default_pk, self.book])
# The model the FK on the book model points to.
fk_field = changes["otherapp"][0].operations[0].fields[2][1]
self.assertEqual(fk_field.remote_field.model, "testapp.Author")
# Now, we test the custom pk field name
changes = self.get_changes([], [self.author_unmanaged_custom_pk, self.book])
# The model the FK on the book model points to.
fk_field = changes["otherapp"][0].operations[0].fields[2][1]
self.assertEqual(fk_field.remote_field.model, "testapp.Author")
@override_settings(AUTH_USER_MODEL="thirdapp.CustomUser")
def test_swappable(self):
with isolate_lru_cache(apps.get_swappable_settings_name):
changes = self.get_changes(
[self.custom_user], [self.custom_user, self.author_with_custom_user]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertMigrationDependencies(
changes, "testapp", 0, [("__setting__", "AUTH_USER_MODEL")]
)
def test_swappable_lowercase(self):
model_state = ModelState(
"testapp",
"Document",
[
("id", models.AutoField(primary_key=True)),
(
"owner",
models.ForeignKey(
settings.AUTH_USER_MODEL.lower(),
models.CASCADE,
),
),
],
)
with isolate_lru_cache(apps.get_swappable_settings_name):
changes = self.get_changes([], [model_state])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Document")
self.assertMigrationDependencies(
changes,
"testapp",
0,
[("__setting__", "AUTH_USER_MODEL")],
)
@override_settings(AUTH_USER_MODEL="thirdapp.CustomUser")
def test_swappable_many_to_many_model_case(self):
document_lowercase = ModelState(
"testapp",
"Document",
[
("id", models.AutoField(primary_key=True)),
("owners", models.ManyToManyField(settings.AUTH_USER_MODEL.lower())),
],
)
document = ModelState(
"testapp",
"Document",
[
("id", models.AutoField(primary_key=True)),
("owners", models.ManyToManyField(settings.AUTH_USER_MODEL)),
],
)
with isolate_lru_cache(apps.get_swappable_settings_name):
changes = self.get_changes(
[self.custom_user, document_lowercase],
[self.custom_user, document],
)
self.assertEqual(len(changes), 0)
def test_swappable_changed(self):
with isolate_lru_cache(apps.get_swappable_settings_name):
before = self.make_project_state([self.custom_user, self.author_with_user])
with override_settings(AUTH_USER_MODEL="thirdapp.CustomUser"):
after = self.make_project_state(
[self.custom_user, self.author_with_custom_user]
)
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, model_name="author", name="user"
)
fk_field = changes["testapp"][0].operations[0].field
self.assertEqual(fk_field.remote_field.model, "thirdapp.CustomUser")
def test_add_field_with_default(self):
"""#22030 - Adding a field with a default should work."""
changes = self.get_changes([self.author_empty], [self.author_name_default])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name")
def test_custom_deconstructible(self):
"""
Two instances which deconstruct to the same value aren't considered a
change.
"""
changes = self.get_changes(
[self.author_name_deconstructible_1], [self.author_name_deconstructible_2]
)
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_deconstruct_field_kwarg(self):
"""Field instances are handled correctly by nested deconstruction."""
changes = self.get_changes(
[self.author_name_deconstructible_3], [self.author_name_deconstructible_4]
)
self.assertEqual(changes, {})
def test_deconstructible_list(self):
"""Nested deconstruction descends into lists."""
# When lists contain items that deconstruct to identical values, those lists
# should be considered equal for the purpose of detecting state changes
# (even if the original items are unequal).
changes = self.get_changes(
[self.author_name_deconstructible_list_1],
[self.author_name_deconstructible_list_2],
)
self.assertEqual(changes, {})
# Legitimate differences within the deconstructed lists should be reported
# as a change
changes = self.get_changes(
[self.author_name_deconstructible_list_1],
[self.author_name_deconstructible_list_3],
)
self.assertEqual(len(changes), 1)
def test_deconstructible_tuple(self):
"""Nested deconstruction descends into tuples."""
# When tuples contain items that deconstruct to identical values, those tuples
# should be considered equal for the purpose of detecting state changes
# (even if the original items are unequal).
changes = self.get_changes(
[self.author_name_deconstructible_tuple_1],
[self.author_name_deconstructible_tuple_2],
)
self.assertEqual(changes, {})
# Legitimate differences within the deconstructed tuples should be reported
# as a change
changes = self.get_changes(
[self.author_name_deconstructible_tuple_1],
[self.author_name_deconstructible_tuple_3],
)
self.assertEqual(len(changes), 1)
def test_deconstructible_dict(self):
"""Nested deconstruction descends into dict values."""
# When dicts contain items whose values deconstruct to identical values,
# those dicts should be considered equal for the purpose of detecting
# state changes (even if the original values are unequal).
changes = self.get_changes(
[self.author_name_deconstructible_dict_1],
[self.author_name_deconstructible_dict_2],
)
self.assertEqual(changes, {})
# Legitimate differences within the deconstructed dicts should be reported
# as a change
changes = self.get_changes(
[self.author_name_deconstructible_dict_1],
[self.author_name_deconstructible_dict_3],
)
self.assertEqual(len(changes), 1)
def test_nested_deconstructible_objects(self):
"""
Nested deconstruction is applied recursively to the args/kwargs of
deconstructed objects.
"""
# If the items within a deconstructed object's args/kwargs have the same
# deconstructed values - whether or not the items themselves are different
# instances - then the object as a whole is regarded as unchanged.
changes = self.get_changes(
[self.author_name_nested_deconstructible_1],
[self.author_name_nested_deconstructible_2],
)
self.assertEqual(changes, {})
# Differences that exist solely within the args list of a deconstructed object
# should be reported as changes
changes = self.get_changes(
[self.author_name_nested_deconstructible_1],
[self.author_name_nested_deconstructible_changed_arg],
)
self.assertEqual(len(changes), 1)
# Additional args should also be reported as a change
changes = self.get_changes(
[self.author_name_nested_deconstructible_1],
[self.author_name_nested_deconstructible_extra_arg],
)
self.assertEqual(len(changes), 1)
# Differences that exist solely within the kwargs dict of a deconstructed object
# should be reported as changes
changes = self.get_changes(
[self.author_name_nested_deconstructible_1],
[self.author_name_nested_deconstructible_changed_kwarg],
)
self.assertEqual(len(changes), 1)
# Additional kwargs should also be reported as a change
changes = self.get_changes(
[self.author_name_nested_deconstructible_1],
[self.author_name_nested_deconstructible_extra_kwarg],
)
self.assertEqual(len(changes), 1)
def test_deconstruct_type(self):
"""
#22951 -- Uninstantiated classes with deconstruct are correctly returned
by deep_deconstruct during serialization.
"""
author = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
# IntegerField intentionally not instantiated.
default=models.IntegerField,
),
),
],
)
changes = self.get_changes([], [author])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
def test_replace_string_with_foreignkey(self):
"""
#22300 - Adding an FK in the same "spot" as a deleted CharField should
work.
"""
changes = self.get_changes(
[self.author_with_publisher_string],
[self.author_with_publisher, self.publisher],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["CreateModel", "RemoveField", "AddField"]
)
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="publisher_name")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher")
def test_foreign_key_removed_before_target_model(self):
"""
Removing an FK and the model it targets in the same change must remove
the FK field before the model to maintain consistency.
"""
changes = self.get_changes(
[self.author_with_publisher, self.publisher], [self.author_name]
) # removes both the model and FK
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RemoveField", "DeleteModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="publisher")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition",
side_effect=AssertionError("Should not have prompted for not null addition"),
)
def test_add_many_to_many(self, mocked_ask_method):
"""#22435 - Adding a ManyToManyField should not prompt for a default."""
changes = self.get_changes(
[self.author_empty, self.publisher], [self.author_with_m2m, self.publisher]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="publishers")
def test_alter_many_to_many(self):
changes = self.get_changes(
[self.author_with_m2m, self.publisher],
[self.author_with_m2m_blank, self.publisher],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="publishers")
def test_create_with_through_model(self):
"""
Adding a m2m with a through model and the models that use it should be
ordered correctly.
"""
changes = self.get_changes(
[], [self.author_with_m2m_through, self.publisher, self.contract]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
[
"CreateModel",
"CreateModel",
"CreateModel",
"AddField",
],
)
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="Contract")
self.assertOperationAttributes(
changes, "testapp", 0, 3, model_name="author", name="publishers"
)
def test_create_with_through_model_separate_apps(self):
author_with_m2m_through = ModelState(
"authors",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"publishers",
models.ManyToManyField(
"testapp.Publisher", through="contract.Contract"
),
),
],
)
contract = ModelState(
"contract",
"Contract",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("authors.Author", models.CASCADE)),
("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)),
],
)
changes = self.get_changes(
[], [author_with_m2m_through, self.publisher, contract]
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertNumberMigrations(changes, "contract", 1)
self.assertNumberMigrations(changes, "authors", 2)
self.assertMigrationDependencies(
changes,
"authors",
1,
{("authors", "auto_1"), ("contract", "auto_1"), ("testapp", "auto_1")},
)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Publisher")
self.assertOperationTypes(changes, "contract", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "contract", 0, 0, name="Contract")
self.assertOperationTypes(changes, "authors", 0, ["CreateModel"])
self.assertOperationTypes(changes, "authors", 1, ["AddField"])
self.assertOperationAttributes(changes, "authors", 0, 0, name="Author")
self.assertOperationAttributes(
changes, "authors", 1, 0, model_name="author", name="publishers"
)
def test_many_to_many_removed_before_through_model(self):
"""
Removing a ManyToManyField and the "through" model in the same change
must remove the field before the model to maintain consistency.
"""
changes = self.get_changes(
[
self.book_with_multiple_authors_through_attribution,
self.author_name,
self.attribution,
],
[self.book_with_no_author, self.author_name],
)
# Remove both the through model and ManyToMany
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes, "otherapp", 0, ["RemoveField", "DeleteModel"]
)
self.assertOperationAttributes(
changes, "otherapp", 0, 0, name="authors", model_name="book"
)
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="Attribution")
def test_many_to_many_removed_before_through_model_2(self):
"""
Removing a model that contains a ManyToManyField and the "through" model
in the same change must remove the field before the model to maintain
consistency.
"""
changes = self.get_changes(
[
self.book_with_multiple_authors_through_attribution,
self.author_name,
self.attribution,
],
[self.author_name],
)
# Remove both the through model and ManyToMany
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes, "otherapp", 0, ["RemoveField", "DeleteModel", "DeleteModel"]
)
self.assertOperationAttributes(
changes, "otherapp", 0, 0, name="authors", model_name="book"
)
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="Attribution")
self.assertOperationAttributes(changes, "otherapp", 0, 2, name="Book")
def test_m2m_w_through_multistep_remove(self):
"""
A model with a m2m field that specifies a "through" model cannot be
removed in the same migration as that through model as the schema will
pass through an inconsistent state. The autodetector should produce two
migrations to avoid this issue.
"""
changes = self.get_changes(
[self.author_with_m2m_through, self.publisher, self.contract],
[self.publisher],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
["RemoveField", "RemoveField", "DeleteModel", "DeleteModel"],
)
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="author", model_name="contract"
)
self.assertOperationAttributes(
changes, "testapp", 0, 1, name="publisher", model_name="contract"
)
self.assertOperationAttributes(changes, "testapp", 0, 2, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 3, name="Contract")
def test_concrete_field_changed_to_many_to_many(self):
"""
#23938 - Changing a concrete field into a ManyToManyField
first removes the concrete field and then adds the m2m field.
"""
changes = self.get_changes(
[self.author_with_former_m2m], [self.author_with_m2m, self.publisher]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["CreateModel", "RemoveField", "AddField"]
)
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Publisher")
self.assertOperationAttributes(
changes, "testapp", 0, 1, name="publishers", model_name="author"
)
self.assertOperationAttributes(
changes, "testapp", 0, 2, name="publishers", model_name="author"
)
def test_many_to_many_changed_to_concrete_field(self):
"""
#23938 - Changing a ManyToManyField into a concrete field
first removes the m2m field and then adds the concrete field.
"""
changes = self.get_changes(
[self.author_with_m2m, self.publisher], [self.author_with_former_m2m]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["RemoveField", "DeleteModel", "AddField"]
)
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="publishers", model_name="author"
)
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertOperationAttributes(
changes, "testapp", 0, 2, name="publishers", model_name="author"
)
self.assertOperationFieldAttributes(changes, "testapp", 0, 2, max_length=100)
def test_non_circular_foreignkey_dependency_removal(self):
"""
If two models with a ForeignKey from one to the other are removed at the
same time, the autodetector should remove them in the correct order.
"""
changes = self.get_changes(
[self.author_with_publisher, self.publisher_with_author], []
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["RemoveField", "DeleteModel", "DeleteModel"]
)
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="author", model_name="publisher"
)
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="Publisher")
def test_alter_model_options(self):
"""Changing a model's options should make a change."""
changes = self.get_changes([self.author_empty], [self.author_with_options])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
options={
"permissions": [("can_hire", "Can hire")],
"verbose_name": "Authi",
},
)
# Changing them back to empty should also make a change
changes = self.get_changes([self.author_with_options], [self.author_empty])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="author", options={}
)
def test_alter_model_options_proxy(self):
"""Changing a proxy model's options should also make a change."""
changes = self.get_changes(
[self.author_proxy, self.author_empty],
[self.author_proxy_options, self.author_empty],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="authorproxy",
options={"verbose_name": "Super Author"},
)
def test_set_alter_order_with_respect_to(self):
"""Setting order_with_respect_to adds a field."""
changes = self.get_changes(
[self.book, self.author_with_book],
[self.book, self.author_with_book_order_wrt],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterOrderWithRespectTo"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="author", order_with_respect_to="book"
)
def test_add_alter_order_with_respect_to(self):
"""
Setting order_with_respect_to when adding the FK too does
things in the right order.
"""
changes = self.get_changes(
[self.author_name], [self.book, self.author_with_book_order_wrt]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["AddField", "AlterOrderWithRespectTo"]
)
self.assertOperationAttributes(
changes, "testapp", 0, 0, model_name="author", name="book"
)
self.assertOperationAttributes(
changes, "testapp", 0, 1, name="author", order_with_respect_to="book"
)
def test_remove_alter_order_with_respect_to(self):
"""
Removing order_with_respect_to when removing the FK too does
things in the right order.
"""
changes = self.get_changes(
[self.book, self.author_with_book_order_wrt], [self.author_name]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["AlterOrderWithRespectTo", "RemoveField"]
)
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="author", order_with_respect_to=None
)
self.assertOperationAttributes(
changes, "testapp", 0, 1, model_name="author", name="book"
)
def test_add_model_order_with_respect_to(self):
"""
Setting order_with_respect_to when adding the whole model
does things in the right order.
"""
changes = self.get_changes([], [self.book, self.author_with_book_order_wrt])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="Author",
options={"order_with_respect_to": "book"},
)
self.assertNotIn(
"_order",
[name for name, field in changes["testapp"][0].operations[0].fields],
)
def test_add_model_order_with_respect_to_unique_together(self):
changes = self.get_changes(
[],
[
self.book,
ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
options={
"order_with_respect_to": "book",
"unique_together": {("id", "_order")},
},
),
],
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="Author",
options={
"order_with_respect_to": "book",
"unique_together": {("id", "_order")},
},
)
def test_add_model_order_with_respect_to_index_constraint(self):
tests = [
(
"AddIndex",
{
"indexes": [
models.Index(fields=["_order"], name="book_order_idx"),
]
},
),
(
"AddConstraint",
{
"constraints": [
models.CheckConstraint(
check=models.Q(_order__gt=1),
name="book_order_gt_1",
),
]
},
),
]
for operation, extra_option in tests:
with self.subTest(operation=operation):
after = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
options={
"order_with_respect_to": "book",
**extra_option,
},
)
changes = self.get_changes([], [self.book, after])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
[
"CreateModel",
operation,
],
)
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="Author",
options={"order_with_respect_to": "book"},
)
def test_set_alter_order_with_respect_to_index_constraint_unique_together(self):
tests = [
(
"AddIndex",
{
"indexes": [
models.Index(fields=["_order"], name="book_order_idx"),
]
},
),
(
"AddConstraint",
{
"constraints": [
models.CheckConstraint(
check=models.Q(_order__gt=1),
name="book_order_gt_1",
),
]
},
),
("AlterUniqueTogether", {"unique_together": {("id", "_order")}}),
]
for operation, extra_option in tests:
with self.subTest(operation=operation):
after = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
options={
"order_with_respect_to": "book",
**extra_option,
},
)
changes = self.get_changes(
[self.book, self.author_with_book],
[self.book, after],
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
[
"AlterOrderWithRespectTo",
operation,
],
)
def test_alter_model_managers(self):
"""
Changing the model managers adds a new operation.
"""
changes = self.get_changes([self.other_pony], [self.other_pony_food])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterModelManagers"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="pony")
self.assertEqual(
[name for name, mgr in changes["otherapp"][0].operations[0].managers],
["food_qs", "food_mgr", "food_mgr_kwargs"],
)
self.assertEqual(
changes["otherapp"][0].operations[0].managers[1][1].args, ("a", "b", 1, 2)
)
self.assertEqual(
changes["otherapp"][0].operations[0].managers[2][1].args, ("x", "y", 3, 4)
)
def test_swappable_first_inheritance(self):
"""Swappable models get their CreateModel first."""
changes = self.get_changes([], [self.custom_user, self.aardvark])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "thirdapp", 1)
self.assertOperationTypes(
changes, "thirdapp", 0, ["CreateModel", "CreateModel"]
)
self.assertOperationAttributes(changes, "thirdapp", 0, 0, name="CustomUser")
self.assertOperationAttributes(changes, "thirdapp", 0, 1, name="Aardvark")
def test_default_related_name_option(self):
model_state = ModelState(
"app",
"model",
[
("id", models.AutoField(primary_key=True)),
],
options={"default_related_name": "related_name"},
)
changes = self.get_changes([], [model_state])
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["CreateModel"])
self.assertOperationAttributes(
changes,
"app",
0,
0,
name="model",
options={"default_related_name": "related_name"},
)
altered_model_state = ModelState(
"app",
"Model",
[
("id", models.AutoField(primary_key=True)),
],
)
changes = self.get_changes([model_state], [altered_model_state])
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, "app", 0, 0, name="model", options={})
@override_settings(AUTH_USER_MODEL="thirdapp.CustomUser")
def test_swappable_first_setting(self):
"""Swappable models get their CreateModel first."""
with isolate_lru_cache(apps.get_swappable_settings_name):
changes = self.get_changes([], [self.custom_user_no_inherit, self.aardvark])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "thirdapp", 1)
self.assertOperationTypes(
changes, "thirdapp", 0, ["CreateModel", "CreateModel"]
)
self.assertOperationAttributes(changes, "thirdapp", 0, 0, name="CustomUser")
self.assertOperationAttributes(changes, "thirdapp", 0, 1, name="Aardvark")
def test_bases_first(self):
"""Bases of other models come first."""
changes = self.get_changes(
[], [self.aardvark_based_on_author, self.author_name]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Aardvark")
def test_bases_first_mixed_case_app_label(self):
app_label = "MiXedCaseApp"
changes = self.get_changes(
[],
[
ModelState(
app_label,
"owner",
[
("id", models.AutoField(primary_key=True)),
],
),
ModelState(
app_label,
"place",
[
("id", models.AutoField(primary_key=True)),
(
"owner",
models.ForeignKey("MiXedCaseApp.owner", models.CASCADE),
),
],
),
ModelState(app_label, "restaurant", [], bases=("MiXedCaseApp.place",)),
],
)
self.assertNumberMigrations(changes, app_label, 1)
self.assertOperationTypes(
changes,
app_label,
0,
[
"CreateModel",
"CreateModel",
"CreateModel",
],
)
self.assertOperationAttributes(changes, app_label, 0, 0, name="owner")
self.assertOperationAttributes(changes, app_label, 0, 1, name="place")
self.assertOperationAttributes(changes, app_label, 0, 2, name="restaurant")
def test_multiple_bases(self):
"""
Inheriting models doesn't move *_ptr fields into AddField operations.
"""
A = ModelState("app", "A", [("a_id", models.AutoField(primary_key=True))])
B = ModelState("app", "B", [("b_id", models.AutoField(primary_key=True))])
C = ModelState("app", "C", [], bases=("app.A", "app.B"))
D = ModelState("app", "D", [], bases=("app.A", "app.B"))
E = ModelState("app", "E", [], bases=("app.A", "app.B"))
changes = self.get_changes([], [A, B, C, D, E])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(
changes,
"app",
0,
["CreateModel", "CreateModel", "CreateModel", "CreateModel", "CreateModel"],
)
self.assertOperationAttributes(changes, "app", 0, 0, name="A")
self.assertOperationAttributes(changes, "app", 0, 1, name="B")
self.assertOperationAttributes(changes, "app", 0, 2, name="C")
self.assertOperationAttributes(changes, "app", 0, 3, name="D")
self.assertOperationAttributes(changes, "app", 0, 4, name="E")
def test_proxy_bases_first(self):
"""Bases of proxies come first."""
changes = self.get_changes(
[], [self.author_empty, self.author_proxy, self.author_proxy_proxy]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["CreateModel", "CreateModel", "CreateModel"]
)
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="AuthorProxy")
self.assertOperationAttributes(
changes, "testapp", 0, 2, name="AAuthorProxyProxy"
)
def test_pk_fk_included(self):
"""
A relation used as the primary key is kept as part of CreateModel.
"""
changes = self.get_changes([], [self.aardvark_pk_fk_author, self.author_name])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Aardvark")
def test_first_dependency(self):
"""
A dependency to an app with no migrations uses __first__.
"""
# Load graph
loader = MigrationLoader(connection)
before = self.make_project_state([])
after = self.make_project_state([self.book_migrations_fk])
after.real_apps = {"migrations"}
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes(graph=loader.graph)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Book")
self.assertMigrationDependencies(
changes, "otherapp", 0, [("migrations", "__first__")]
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_last_dependency(self):
"""
A dependency to an app with existing migrations uses the
last migration of that app.
"""
# Load graph
loader = MigrationLoader(connection)
before = self.make_project_state([])
after = self.make_project_state([self.book_migrations_fk])
after.real_apps = {"migrations"}
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes(graph=loader.graph)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Book")
self.assertMigrationDependencies(
changes, "otherapp", 0, [("migrations", "0002_second")]
)
def test_alter_fk_before_model_deletion(self):
"""
ForeignKeys are altered _before_ the model they used to
refer to are deleted.
"""
changes = self.get_changes(
[self.author_name, self.publisher_with_author],
[self.aardvark_testapp, self.publisher_with_aardvark_author],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["CreateModel", "AlterField", "DeleteModel"]
)
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Aardvark")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="author")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="Author")
def test_fk_dependency_other_app(self):
"""
#23100 - ForeignKeys correctly depend on other apps' models.
"""
changes = self.get_changes(
[self.author_name, self.book], [self.author_with_book, self.book]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="book")
self.assertMigrationDependencies(
changes, "testapp", 0, [("otherapp", "__first__")]
)
def test_alter_unique_together_fk_to_m2m(self):
changes = self.get_changes(
[self.author_name, self.book_unique_together],
[
self.author_name,
ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ManyToManyField("testapp.Author")),
("title", models.CharField(max_length=200)),
],
),
],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes, "otherapp", 0, ["AlterUniqueTogether", "RemoveField", "AddField"]
)
self.assertOperationAttributes(
changes, "otherapp", 0, 0, name="book", unique_together=set()
)
self.assertOperationAttributes(
changes, "otherapp", 0, 1, model_name="book", name="author"
)
self.assertOperationAttributes(
changes, "otherapp", 0, 2, model_name="book", name="author"
)
def test_alter_field_to_fk_dependency_other_app(self):
changes = self.get_changes(
[self.author_empty, self.book_with_no_author_fk],
[self.author_empty, self.book],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterField"])
self.assertMigrationDependencies(
changes, "otherapp", 0, [("testapp", "__first__")]
)
def test_circular_dependency_mixed_addcreate(self):
"""
#23315 - The dependency resolver knows to put all CreateModel
before AddField and not become unsolvable.
"""
address = ModelState(
"a",
"Address",
[
("id", models.AutoField(primary_key=True)),
("country", models.ForeignKey("b.DeliveryCountry", models.CASCADE)),
],
)
person = ModelState(
"a",
"Person",
[
("id", models.AutoField(primary_key=True)),
],
)
apackage = ModelState(
"b",
"APackage",
[
("id", models.AutoField(primary_key=True)),
("person", models.ForeignKey("a.Person", models.CASCADE)),
],
)
country = ModelState(
"b",
"DeliveryCountry",
[
("id", models.AutoField(primary_key=True)),
],
)
changes = self.get_changes([], [address, person, apackage, country])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "a", 2)
self.assertNumberMigrations(changes, "b", 1)
self.assertOperationTypes(changes, "a", 0, ["CreateModel", "CreateModel"])
self.assertOperationTypes(changes, "a", 1, ["AddField"])
self.assertOperationTypes(changes, "b", 0, ["CreateModel", "CreateModel"])
@override_settings(AUTH_USER_MODEL="a.Tenant")
def test_circular_dependency_swappable(self):
"""
#23322 - The dependency resolver knows to explicitly resolve
swappable models.
"""
with isolate_lru_cache(apps.get_swappable_settings_name):
tenant = ModelState(
"a",
"Tenant",
[
("id", models.AutoField(primary_key=True)),
("primary_address", models.ForeignKey("b.Address", models.CASCADE)),
],
bases=(AbstractBaseUser,),
)
address = ModelState(
"b",
"Address",
[
("id", models.AutoField(primary_key=True)),
(
"tenant",
models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE),
),
],
)
changes = self.get_changes([], [address, tenant])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "a", 2)
self.assertOperationTypes(changes, "a", 0, ["CreateModel"])
self.assertOperationTypes(changes, "a", 1, ["AddField"])
self.assertMigrationDependencies(changes, "a", 0, [])
self.assertMigrationDependencies(
changes, "a", 1, [("a", "auto_1"), ("b", "auto_1")]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "b", 1)
self.assertOperationTypes(changes, "b", 0, ["CreateModel"])
self.assertMigrationDependencies(
changes, "b", 0, [("__setting__", "AUTH_USER_MODEL")]
)
@override_settings(AUTH_USER_MODEL="b.Tenant")
def test_circular_dependency_swappable2(self):
"""
#23322 - The dependency resolver knows to explicitly resolve
swappable models but with the swappable not being the first migrated
model.
"""
with isolate_lru_cache(apps.get_swappable_settings_name):
address = ModelState(
"a",
"Address",
[
("id", models.AutoField(primary_key=True)),
(
"tenant",
models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE),
),
],
)
tenant = ModelState(
"b",
"Tenant",
[
("id", models.AutoField(primary_key=True)),
("primary_address", models.ForeignKey("a.Address", models.CASCADE)),
],
bases=(AbstractBaseUser,),
)
changes = self.get_changes([], [address, tenant])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "a", 2)
self.assertOperationTypes(changes, "a", 0, ["CreateModel"])
self.assertOperationTypes(changes, "a", 1, ["AddField"])
self.assertMigrationDependencies(changes, "a", 0, [])
self.assertMigrationDependencies(
changes, "a", 1, [("__setting__", "AUTH_USER_MODEL"), ("a", "auto_1")]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "b", 1)
self.assertOperationTypes(changes, "b", 0, ["CreateModel"])
self.assertMigrationDependencies(changes, "b", 0, [("a", "auto_1")])
@override_settings(AUTH_USER_MODEL="a.Person")
def test_circular_dependency_swappable_self(self):
"""
#23322 - The dependency resolver knows to explicitly resolve
swappable models.
"""
with isolate_lru_cache(apps.get_swappable_settings_name):
person = ModelState(
"a",
"Person",
[
("id", models.AutoField(primary_key=True)),
(
"parent1",
models.ForeignKey(
settings.AUTH_USER_MODEL,
models.CASCADE,
related_name="children",
),
),
],
)
changes = self.get_changes([], [person])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "a", 1)
self.assertOperationTypes(changes, "a", 0, ["CreateModel"])
self.assertMigrationDependencies(changes, "a", 0, [])
@override_settings(AUTH_USER_MODEL="a.User")
def test_swappable_circular_multi_mti(self):
with isolate_lru_cache(apps.get_swappable_settings_name):
parent = ModelState(
"a",
"Parent",
[("user", models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE))],
)
child = ModelState("a", "Child", [], bases=("a.Parent",))
user = ModelState("a", "User", [], bases=(AbstractBaseUser, "a.Child"))
changes = self.get_changes([], [parent, child, user])
self.assertNumberMigrations(changes, "a", 1)
self.assertOperationTypes(
changes, "a", 0, ["CreateModel", "CreateModel", "CreateModel", "AddField"]
)
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition",
side_effect=AssertionError("Should not have prompted for not null addition"),
)
def test_add_blank_textfield_and_charfield(self, mocked_ask_method):
"""
#23405 - Adding a NOT NULL and blank `CharField` or `TextField`
without default should not prompt for a default.
"""
changes = self.get_changes(
[self.author_empty], [self.author_with_biography_blank]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AddField", "AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0)
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition"
)
def test_add_non_blank_textfield_and_charfield(self, mocked_ask_method):
"""
#23405 - Adding a NOT NULL and non-blank `CharField` or `TextField`
without default should prompt for a default.
"""
changes = self.get_changes(
[self.author_empty], [self.author_with_biography_non_blank]
)
self.assertEqual(mocked_ask_method.call_count, 2)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AddField", "AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0)
def test_mti_inheritance_model_removal(self):
Animal = ModelState(
"app",
"Animal",
[
("id", models.AutoField(primary_key=True)),
],
)
Dog = ModelState("app", "Dog", [], bases=("app.Animal",))
changes = self.get_changes([Animal, Dog], [Animal])
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["DeleteModel"])
self.assertOperationAttributes(changes, "app", 0, 0, name="Dog")
def test_add_model_with_field_removed_from_base_model(self):
"""
Removing a base field takes place before adding a new inherited model
that has a field with the same name.
"""
before = [
ModelState(
"app",
"readable",
[
("id", models.AutoField(primary_key=True)),
("title", models.CharField(max_length=200)),
],
),
]
after = [
ModelState(
"app",
"readable",
[
("id", models.AutoField(primary_key=True)),
],
),
ModelState(
"app",
"book",
[
("title", models.CharField(max_length=200)),
],
bases=("app.readable",),
),
]
changes = self.get_changes(before, after)
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["RemoveField", "CreateModel"])
self.assertOperationAttributes(
changes, "app", 0, 0, name="title", model_name="readable"
)
self.assertOperationAttributes(changes, "app", 0, 1, name="book")
def test_parse_number(self):
tests = [
("no_number", None),
("0001_initial", 1),
("0002_model3", 2),
("0002_auto_20380101_1112", 2),
("0002_squashed_0003", 3),
("0002_model2_squashed_0003_other4", 3),
("0002_squashed_0003_squashed_0004", 4),
("0002_model2_squashed_0003_other4_squashed_0005_other6", 5),
("0002_custom_name_20380101_1112_squashed_0003_model", 3),
("2_squashed_4", 4),
]
for migration_name, expected_number in tests:
with self.subTest(migration_name=migration_name):
self.assertEqual(
MigrationAutodetector.parse_number(migration_name),
expected_number,
)
def test_add_custom_fk_with_hardcoded_to(self):
class HardcodedForeignKey(models.ForeignKey):
def __init__(self, *args, **kwargs):
kwargs["to"] = "testapp.Author"
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs["to"]
return name, path, args, kwargs
book_hardcoded_fk_to = ModelState(
"testapp",
"Book",
[
("author", HardcodedForeignKey(on_delete=models.CASCADE)),
],
)
changes = self.get_changes(
[self.author_empty],
[self.author_empty, book_hardcoded_fk_to],
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Book")
@ignore_warnings(category=RemovedInDjango51Warning)
class AutodetectorIndexTogetherTests(BaseAutodetectorTests):
book_index_together = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"index_together": {("author", "title")},
},
)
book_index_together_2 = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"index_together": {("title", "author")},
},
)
book_index_together_3 = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("newfield", models.IntegerField()),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"index_together": {("title", "newfield")},
},
)
book_index_together_4 = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("newfield2", models.IntegerField()),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"index_together": {("title", "newfield2")},
},
)
def test_empty_index_together(self):
"""Empty index_together shouldn't generate a migration."""
# Explicitly testing for not specified, since this is the case after
# a CreateModel operation w/o any definition on the original model
model_state_not_specified = ModelState(
"a", "model", [("id", models.AutoField(primary_key=True))]
)
# Explicitly testing for None, since this was the issue in #23452 after
# an AlterIndexTogether operation with e.g. () as value
model_state_none = ModelState(
"a",
"model",
[("id", models.AutoField(primary_key=True))],
{
"index_together": None,
},
)
# Explicitly testing for the empty set, since we now always have sets.
# During removal (('col1', 'col2'),) --> () this becomes set([])
model_state_empty = ModelState(
"a",
"model",
[("id", models.AutoField(primary_key=True))],
{
"index_together": set(),
},
)
def test(from_state, to_state, msg):
changes = self.get_changes([from_state], [to_state])
if changes:
ops = ", ".join(
o.__class__.__name__ for o in changes["a"][0].operations
)
self.fail("Created operation(s) %s from %s" % (ops, msg))
tests = (
(
model_state_not_specified,
model_state_not_specified,
'"not specified" to "not specified"',
),
(model_state_not_specified, model_state_none, '"not specified" to "None"'),
(
model_state_not_specified,
model_state_empty,
'"not specified" to "empty"',
),
(model_state_none, model_state_not_specified, '"None" to "not specified"'),
(model_state_none, model_state_none, '"None" to "None"'),
(model_state_none, model_state_empty, '"None" to "empty"'),
(
model_state_empty,
model_state_not_specified,
'"empty" to "not specified"',
),
(model_state_empty, model_state_none, '"empty" to "None"'),
(model_state_empty, model_state_empty, '"empty" to "empty"'),
)
for t in tests:
test(*t)
def test_rename_index_together_to_index(self):
changes = self.get_changes(
[AutodetectorTests.author_empty, self.book_index_together],
[AutodetectorTests.author_empty, AutodetectorTests.book_indexes],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RenameIndex"])
self.assertOperationAttributes(
changes,
"otherapp",
0,
0,
model_name="book",
new_name="book_title_author_idx",
old_fields=("author", "title"),
)
def test_rename_index_together_to_index_extra_options(self):
# Indexes with extra options don't match indexes in index_together.
book_partial_index = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"indexes": [
models.Index(
fields=["author", "title"],
condition=models.Q(title__startswith="The"),
name="book_title_author_idx",
)
],
},
)
changes = self.get_changes(
[AutodetectorTests.author_empty, self.book_index_together],
[AutodetectorTests.author_empty, book_partial_index],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["AlterIndexTogether", "AddIndex"],
)
def test_rename_index_together_to_index_order_fields(self):
# Indexes with reordered fields don't match indexes in index_together.
changes = self.get_changes(
[AutodetectorTests.author_empty, self.book_index_together],
[AutodetectorTests.author_empty, AutodetectorTests.book_unordered_indexes],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["AlterIndexTogether", "AddIndex"],
)
def test_add_index_together(self):
changes = self.get_changes(
[AutodetectorTests.author_empty, AutodetectorTests.book],
[AutodetectorTests.author_empty, self.book_index_together],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterIndexTogether"])
self.assertOperationAttributes(
changes, "otherapp", 0, 0, name="book", index_together={("author", "title")}
)
def test_remove_index_together(self):
changes = self.get_changes(
[AutodetectorTests.author_empty, self.book_index_together],
[AutodetectorTests.author_empty, AutodetectorTests.book],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterIndexTogether"])
self.assertOperationAttributes(
changes, "otherapp", 0, 0, name="book", index_together=set()
)
def test_index_together_remove_fk(self):
changes = self.get_changes(
[AutodetectorTests.author_empty, self.book_index_together],
[AutodetectorTests.author_empty, AutodetectorTests.book_with_no_author],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["AlterIndexTogether", "RemoveField"],
)
self.assertOperationAttributes(
changes, "otherapp", 0, 0, name="book", index_together=set()
)
self.assertOperationAttributes(
changes, "otherapp", 0, 1, model_name="book", name="author"
)
def test_index_together_no_changes(self):
"""
index_together doesn't generate a migration if no changes have been
made.
"""
changes = self.get_changes(
[AutodetectorTests.author_empty, self.book_index_together],
[AutodetectorTests.author_empty, self.book_index_together],
)
self.assertEqual(len(changes), 0)
def test_index_together_ordering(self):
"""index_together triggers on ordering changes."""
changes = self.get_changes(
[AutodetectorTests.author_empty, self.book_index_together],
[AutodetectorTests.author_empty, self.book_index_together_2],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["AlterIndexTogether"],
)
self.assertOperationAttributes(
changes,
"otherapp",
0,
0,
name="book",
index_together={("title", "author")},
)
def test_add_field_and_index_together(self):
"""
Added fields will be created before using them in index_together.
"""
changes = self.get_changes(
[AutodetectorTests.author_empty, AutodetectorTests.book],
[AutodetectorTests.author_empty, self.book_index_together_3],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["AddField", "AlterIndexTogether"],
)
self.assertOperationAttributes(
changes,
"otherapp",
0,
1,
name="book",
index_together={("title", "newfield")},
)
def test_create_model_and_index_together(self):
author = ModelState(
"otherapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
],
)
book_with_author = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("otherapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"index_together": {("title", "author")},
},
)
changes = self.get_changes(
[AutodetectorTests.book_with_no_author], [author, book_with_author]
)
self.assertEqual(len(changes["otherapp"]), 1)
migration = changes["otherapp"][0]
self.assertEqual(len(migration.operations), 3)
self.assertOperationTypes(
changes,
"otherapp",
0,
["CreateModel", "AddField", "AlterIndexTogether"],
)
def test_remove_field_and_index_together(self):
"""
Removed fields will be removed after updating index_together.
"""
changes = self.get_changes(
[AutodetectorTests.author_empty, self.book_index_together_3],
[AutodetectorTests.author_empty, self.book_index_together],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["AlterIndexTogether", "RemoveField"],
)
self.assertOperationAttributes(
changes,
"otherapp",
0,
0,
name="book",
index_together={("author", "title")},
)
self.assertOperationAttributes(
changes,
"otherapp",
0,
1,
model_name="book",
name="newfield",
)
def test_alter_field_and_index_together(self):
"""Fields are altered after deleting some index_together."""
initial_author = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("age", models.IntegerField(db_index=True)),
],
{
"index_together": {("name",)},
},
)
author_reversed_constraints = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, unique=True)),
("age", models.IntegerField()),
],
{
"index_together": {("age",)},
},
)
changes = self.get_changes([initial_author], [author_reversed_constraints])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
[
"AlterIndexTogether",
"AlterField",
"AlterField",
"AlterIndexTogether",
],
)
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="author",
index_together=set(),
)
self.assertOperationAttributes(
changes,
"testapp",
0,
1,
model_name="author",
name="age",
)
self.assertOperationAttributes(
changes,
"testapp",
0,
2,
model_name="author",
name="name",
)
self.assertOperationAttributes(
changes,
"testapp",
0,
3,
name="author",
index_together={("age",)},
)
def test_partly_alter_index_together_increase(self):
initial_author = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("age", models.IntegerField()),
],
{
"index_together": {("name",)},
},
)
author_new_constraints = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("age", models.IntegerField()),
],
{
"index_together": {("name",), ("age",)},
},
)
changes = self.get_changes([initial_author], [author_new_constraints])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
["AlterIndexTogether"],
)
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="author",
index_together={("name",), ("age",)},
)
def test_partly_alter_index_together_decrease(self):
initial_author = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("age", models.IntegerField()),
],
{
"index_together": {("name",), ("age",)},
},
)
author_new_constraints = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("age", models.IntegerField()),
],
{
"index_together": {("age",)},
},
)
changes = self.get_changes([initial_author], [author_new_constraints])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
["AlterIndexTogether"],
)
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="author",
index_together={("age",)},
)
def test_rename_field_and_index_together(self):
"""Fields are renamed before updating index_together."""
changes = self.get_changes(
[AutodetectorTests.author_empty, self.book_index_together_3],
[AutodetectorTests.author_empty, self.book_index_together_4],
MigrationQuestioner({"ask_rename": True}),
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["RenameField", "AlterIndexTogether"],
)
self.assertOperationAttributes(
changes,
"otherapp",
0,
1,
name="book",
index_together={("title", "newfield2")},
)
def test_add_model_order_with_respect_to_index_together(self):
changes = self.get_changes(
[],
[
AutodetectorTests.book,
ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
options={
"order_with_respect_to": "book",
"index_together": {("name", "_order")},
},
),
],
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="Author",
options={
"order_with_respect_to": "book",
"index_together": {("name", "_order")},
},
)
def test_set_alter_order_with_respect_to_index_together(self):
after = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
options={
"order_with_respect_to": "book",
"index_together": {("name", "_order")},
},
)
changes = self.get_changes(
[AutodetectorTests.book, AutodetectorTests.author_with_book],
[AutodetectorTests.book, after],
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
["AlterOrderWithRespectTo", "AlterIndexTogether"],
)
class MigrationSuggestNameTests(SimpleTestCase):
def test_no_operations(self):
class Migration(migrations.Migration):
operations = []
migration = Migration("some_migration", "test_app")
self.assertIs(migration.suggest_name().startswith("auto_"), True)
def test_no_operations_initial(self):
class Migration(migrations.Migration):
initial = True
operations = []
migration = Migration("some_migration", "test_app")
self.assertEqual(migration.suggest_name(), "initial")
def test_single_operation(self):
class Migration(migrations.Migration):
operations = [migrations.CreateModel("Person", fields=[])]
migration = Migration("0001_initial", "test_app")
self.assertEqual(migration.suggest_name(), "person")
class Migration(migrations.Migration):
operations = [migrations.DeleteModel("Person")]
migration = Migration("0002_initial", "test_app")
self.assertEqual(migration.suggest_name(), "delete_person")
def test_single_operation_long_name(self):
class Migration(migrations.Migration):
operations = [migrations.CreateModel("A" * 53, fields=[])]
migration = Migration("some_migration", "test_app")
self.assertEqual(migration.suggest_name(), "a" * 53)
def test_two_operations(self):
class Migration(migrations.Migration):
operations = [
migrations.CreateModel("Person", fields=[]),
migrations.DeleteModel("Animal"),
]
migration = Migration("some_migration", "test_app")
self.assertEqual(migration.suggest_name(), "person_delete_animal")
def test_two_create_models(self):
class Migration(migrations.Migration):
operations = [
migrations.CreateModel("Person", fields=[]),
migrations.CreateModel("Animal", fields=[]),
]
migration = Migration("0001_initial", "test_app")
self.assertEqual(migration.suggest_name(), "person_animal")
def test_two_create_models_with_initial_true(self):
class Migration(migrations.Migration):
initial = True
operations = [
migrations.CreateModel("Person", fields=[]),
migrations.CreateModel("Animal", fields=[]),
]
migration = Migration("0001_initial", "test_app")
self.assertEqual(migration.suggest_name(), "initial")
def test_many_operations_suffix(self):
class Migration(migrations.Migration):
operations = [
migrations.CreateModel("Person1", fields=[]),
migrations.CreateModel("Person2", fields=[]),
migrations.CreateModel("Person3", fields=[]),
migrations.DeleteModel("Person4"),
migrations.DeleteModel("Person5"),
]
migration = Migration("some_migration", "test_app")
self.assertEqual(
migration.suggest_name(),
"person1_person2_person3_delete_person4_and_more",
)
def test_operation_with_no_suggested_name(self):
class Migration(migrations.Migration):
operations = [
migrations.CreateModel("Person", fields=[]),
migrations.RunSQL("SELECT 1 FROM person;"),
]
migration = Migration("some_migration", "test_app")
self.assertIs(migration.suggest_name().startswith("auto_"), True)
def test_none_name(self):
class Migration(migrations.Migration):
operations = [migrations.RunSQL("SELECT 1 FROM person;")]
migration = Migration("0001_initial", "test_app")
suggest_name = migration.suggest_name()
self.assertIs(suggest_name.startswith("auto_"), True)
def test_none_name_with_initial_true(self):
class Migration(migrations.Migration):
initial = True
operations = [migrations.RunSQL("SELECT 1 FROM person;")]
migration = Migration("0001_initial", "test_app")
self.assertEqual(migration.suggest_name(), "initial")
def test_auto(self):
migration = migrations.Migration("0001_initial", "test_app")
suggest_name = migration.suggest_name()
self.assertIs(suggest_name.startswith("auto_"), True)
|
ec798cc69fb0146b6ba63af35f404b6d8a7bf2dcf8892469ab9dba3fd66d33c2 | import datetime
import math
import re
from decimal import Decimal
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
Avg,
Case,
Count,
DateField,
DateTimeField,
DecimalField,
DurationField,
Exists,
F,
FloatField,
IntegerField,
Max,
Min,
OuterRef,
Q,
StdDev,
Subquery,
Sum,
TimeField,
Value,
Variance,
When,
)
from django.db.models.expressions import Func, RawSQL
from django.db.models.functions import (
Cast,
Coalesce,
Greatest,
Now,
Pi,
TruncDate,
TruncHour,
)
from django.test import TestCase
from django.test.testcases import skipUnlessDBFeature
from django.test.utils import Approximate, CaptureQueriesContext
from django.utils import timezone
from .models import Author, Book, Publisher, Store
class NowUTC(Now):
template = "CURRENT_TIMESTAMP"
output_field = DateTimeField()
def as_sql(self, compiler, connection, **extra_context):
if connection.features.test_now_utc_template:
extra_context["template"] = connection.features.test_now_utc_template
return super().as_sql(compiler, connection, **extra_context)
class AggregateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name="Adrian Holovaty", age=34)
cls.a2 = Author.objects.create(name="Jacob Kaplan-Moss", age=35)
cls.a3 = Author.objects.create(name="Brad Dayley", age=45)
cls.a4 = Author.objects.create(name="James Bennett", age=29)
cls.a5 = Author.objects.create(name="Jeffrey Forcier", age=37)
cls.a6 = Author.objects.create(name="Paul Bissex", age=29)
cls.a7 = Author.objects.create(name="Wesley J. Chun", age=25)
cls.a8 = Author.objects.create(name="Peter Norvig", age=57)
cls.a9 = Author.objects.create(name="Stuart Russell", age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(
name="Apress", num_awards=3, duration=datetime.timedelta(days=1)
)
cls.p2 = Publisher.objects.create(
name="Sams", num_awards=1, duration=datetime.timedelta(days=2)
)
cls.p3 = Publisher.objects.create(name="Prentice Hall", num_awards=7)
cls.p4 = Publisher.objects.create(name="Morgan Kaufmann", num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn="159059725",
name="The Definitive Guide to Django: Web Development Done Right",
pages=447,
rating=4.5,
price=Decimal("30.00"),
contact=cls.a1,
publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6),
)
cls.b2 = Book.objects.create(
isbn="067232959",
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
rating=3.0,
price=Decimal("23.09"),
contact=cls.a3,
publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3),
)
cls.b3 = Book.objects.create(
isbn="159059996",
name="Practical Django Projects",
pages=300,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a4,
publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23),
)
cls.b4 = Book.objects.create(
isbn="013235613",
name="Python Web Development with Django",
pages=350,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a5,
publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3),
)
cls.b5 = Book.objects.create(
isbn="013790395",
name="Artificial Intelligence: A Modern Approach",
pages=1132,
rating=4.0,
price=Decimal("82.80"),
contact=cls.a8,
publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15),
)
cls.b6 = Book.objects.create(
isbn="155860191",
name=(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp"
),
pages=946,
rating=5.0,
price=Decimal("75.00"),
contact=cls.a8,
publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15),
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name="Amazon.com",
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59),
)
s2 = Store.objects.create(
name="Books.com",
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59),
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30),
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_empty_aggregate(self):
self.assertEqual(Author.objects.aggregate(), {})
def test_aggregate_in_order_by(self):
msg = (
"Using an aggregate in order_by() without also including it in "
"annotate() is not allowed: Avg(F(book__rating)"
)
with self.assertRaisesMessage(FieldError, msg):
Author.objects.values("age").order_by(Avg("book__rating"))
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(
vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)}
)
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(vals, {"age__sum": 254})
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(vals, {"friends__age__avg": Approximate(34.07, places=2)})
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(vals, {"authors__age__avg": Approximate(38.2857, places=2)})
vals = Author.objects.filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(vals, {"book__rating__avg": 4.0})
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(vals, {"publisher__num_awards__sum": 30})
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(vals, {"book__price__sum": Decimal("270.27")})
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(vals, {"books__authors__age__max": 57})
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(vals, {"book__publisher__num_awards__min": 1})
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(
amazon_mean=Avg("books__rating")
)
self.assertEqual(vals, {"amazon_mean": Approximate(4.08, places=2)})
def test_aggregate_transform(self):
vals = Store.objects.aggregate(min_month=Min("original_opening__month"))
self.assertEqual(vals, {"min_month": 3})
def test_aggregate_join_transform(self):
vals = Publisher.objects.aggregate(min_year=Min("book__pubdate__year"))
self.assertEqual(vals, {"min_year": 1991})
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by("pk"),
[
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
],
lambda b: b.name,
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=self.b1.pk)
self.assertEqual(
b.name, "The Definitive Guide to Django: Web Development Done Right"
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_defer(self):
qs = (
Book.objects.annotate(page_sum=Sum("pages"))
.defer("name")
.filter(pk=self.b1.pk)
)
rows = [
(
self.b1.id,
"159059725",
447,
"The Definitive Guide to Django: Web Development Done Right",
)
]
self.assertQuerysetEqual(
qs.order_by("pk"), rows, lambda r: (r.id, r.isbn, r.page_sum, r.name)
)
def test_annotate_defer_select_related(self):
qs = (
Book.objects.select_related("contact")
.annotate(page_sum=Sum("pages"))
.defer("name")
.filter(pk=self.b1.pk)
)
rows = [
(
self.b1.id,
"159059725",
447,
"Adrian Holovaty",
"The Definitive Guide to Django: Web Development Done Right",
)
]
self.assertQuerysetEqual(
qs.order_by("pk"),
rows,
lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name),
)
def test_annotate_m2m(self):
books = (
Book.objects.filter(rating__lt=4.5)
.annotate(Avg("authors__age"))
.order_by("name")
)
self.assertQuerysetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 51.5),
("Practical Django Projects", 29.0),
("Python Web Development with Django", Approximate(30.3, places=1)),
("Sams Teach Yourself Django in 24 Hours", 45.0),
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 2),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
1,
),
("Practical Django Projects", 1),
("Python Web Development with Django", 3),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 2),
],
lambda b: (b.name, b.num_authors),
)
def test_backwards_m2m_annotate(self):
authors = (
Author.objects.filter(name__contains="a")
.annotate(Avg("book__rating"))
.order_by("name")
)
self.assertQuerysetEqual(
authors,
[
("Adrian Holovaty", 4.5),
("Brad Dayley", 3.0),
("Jacob Kaplan-Moss", 4.5),
("James Bennett", 4.0),
("Paul Bissex", 4.0),
("Stuart Russell", 4.0),
],
lambda a: (a.name, a.book__rating__avg),
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors,
[
("Adrian Holovaty", 1),
("Brad Dayley", 1),
("Jacob Kaplan-Moss", 1),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 1),
("Peter Norvig", 2),
("Stuart Russell", 1),
("Wesley J. Chun", 1),
],
lambda a: (a.name, a.num_books),
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 7),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
9,
),
("Practical Django Projects", 3),
("Python Web Development with Django", 7),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 3),
],
lambda b: (b.name, b.publisher__num_awards__sum),
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers,
[
("Apress", Decimal("59.69")),
("Jonno's House of Books", None),
("Morgan Kaufmann", Decimal("75.00")),
("Prentice Hall", Decimal("112.49")),
("Sams", Decimal("23.09")),
],
lambda p: (p.name, p.book__price__sum),
)
def test_annotate_values(self):
books = list(
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values()
)
self.assertEqual(
books,
[
{
"contact_id": self.a1.id,
"id": self.b1.id,
"isbn": "159059725",
"mean_age": 34.5,
"name": (
"The Definitive Guide to Django: Web Development Done Right"
),
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": self.p1.id,
"rating": 4.5,
}
],
)
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values("pk", "isbn", "mean_age")
)
self.assertEqual(
list(books),
[
{
"pk": self.b1.pk,
"isbn": "159059725",
"mean_age": 34.5,
}
],
)
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values("name")
)
self.assertEqual(
list(books),
[{"name": "The Definitive Guide to Django: Web Development Done Right"}],
)
books = (
Book.objects.filter(pk=self.b1.pk)
.values()
.annotate(mean_age=Avg("authors__age"))
)
self.assertEqual(
list(books),
[
{
"contact_id": self.a1.id,
"id": self.b1.id,
"isbn": "159059725",
"mean_age": 34.5,
"name": (
"The Definitive Guide to Django: Web Development Done Right"
),
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": self.p1.id,
"rating": 4.5,
}
],
)
books = (
Book.objects.values("rating")
.annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age"))
.order_by("rating")
)
self.assertEqual(
list(books),
[
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1),
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
},
],
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertQuerysetEqual(
authors,
[
("Adrian Holovaty", 32.0),
("Brad Dayley", None),
("Jacob Kaplan-Moss", 29.5),
("James Bennett", 34.0),
("Jeffrey Forcier", 27.0),
("Paul Bissex", 31.0),
("Peter Norvig", 46.0),
("Stuart Russell", 57.0),
("Wesley J. Chun", Approximate(33.66, places=1)),
],
lambda a: (a.name, a.friends__age__avg),
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
def test_count_star(self):
with self.assertNumQueries(1) as ctx:
Book.objects.aggregate(n=Count("*"))
sql = ctx.captured_queries[0]["sql"]
self.assertIn("SELECT COUNT(*) ", sql)
def test_count_distinct_expression(self):
aggs = Book.objects.aggregate(
distinct_ratings=Count(
Case(When(pages__gt=300, then="rating")), distinct=True
),
)
self.assertEqual(aggs["distinct_ratings"], 4)
def test_distinct_on_aggregate(self):
for aggregate, expected_result in (
(Avg, 4.125),
(Count, 4),
(Sum, 16.5),
):
with self.subTest(aggregate=aggregate.__name__):
books = Book.objects.aggregate(
ratings=aggregate("rating", distinct=True)
)
self.assertEqual(books["ratings"], expected_result)
def test_non_grouped_annotation_not_in_group_by(self):
"""
An annotation not included in values() before an aggregate should be
excluded from the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F("price"))
.filter(rating=4.0)
.values("rating")
.annotate(count=Count("publisher_id", distinct=True))
.values("count", "rating")
.order_by("count")
)
self.assertEqual(list(qs), [{"rating": 4.0, "count": 2}])
def test_grouped_annotation_in_group_by(self):
"""
An annotation included in values() before an aggregate should be
included in the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F("price"))
.filter(rating=4.0)
.values("rating", "xprice")
.annotate(count=Count("publisher_id", distinct=True))
.values("count", "rating")
.order_by("count")
)
self.assertEqual(
list(qs),
[
{"rating": 4.0, "count": 1},
{"rating": 4.0, "count": 2},
],
)
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count("book__id")))
implicit = list(Author.objects.annotate(Count("book")))
self.assertCountEqual(explicit, implicit)
def test_annotate_ordering(self):
books = (
Book.objects.values("rating")
.annotate(oldest=Max("authors__age"))
.order_by("oldest", "rating")
)
self.assertEqual(
list(books),
[
{"rating": 4.5, "oldest": 35},
{"rating": 3.0, "oldest": 45},
{"rating": 4.0, "oldest": 57},
{"rating": 5.0, "oldest": 57},
],
)
books = (
Book.objects.values("rating")
.annotate(oldest=Max("authors__age"))
.order_by("-oldest", "-rating")
)
self.assertEqual(
list(books),
[
{"rating": 5.0, "oldest": 57},
{"rating": 4.0, "oldest": 57},
{"rating": 3.0, "oldest": 45},
{"rating": 4.5, "oldest": 35},
],
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(
Avg("num_authors")
)
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_avg_duration_field(self):
# Explicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg("duration", output_field=DurationField())),
{"duration__avg": datetime.timedelta(days=1, hours=12)},
)
# Implicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg("duration")),
{"duration__avg": datetime.timedelta(days=1, hours=12)},
)
def test_sum_duration_field(self):
self.assertEqual(
Publisher.objects.aggregate(Sum("duration", output_field=DurationField())),
{"duration__sum": datetime.timedelta(days=3)},
)
def test_sum_distinct_aggregate(self):
"""
Sum on a distinct() QuerySet should aggregate only the distinct items.
"""
authors = Author.objects.filter(book__in=[self.b5, self.b6])
self.assertEqual(authors.count(), 3)
distinct_authors = authors.distinct()
self.assertEqual(distinct_authors.count(), 2)
# Selected author ages are 57 and 46
age_sum = distinct_authors.aggregate(Sum("age"))
self.assertEqual(age_sum["age__sum"], 103)
def test_filtering(self):
p = Publisher.objects.create(name="Expensive Publisher", num_awards=0)
Book.objects.create(
name="ExpensiveBook1",
pages=1,
isbn="111",
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 1),
)
Book.objects.create(
name="ExpensiveBook2",
pages=1,
isbn="222",
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 2),
)
Book.objects.create(
name="ExpensiveBook3",
pages=1,
isbn="333",
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 3),
)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerysetEqual(
publishers,
["Apress", "Prentice Hall", "Expensive Publisher"],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by(
"pk"
)
self.assertQuerysetEqual(
publishers,
[
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1, book__price__lt=Decimal("40.0"))
.order_by("pk")
)
self.assertQuerysetEqual(
publishers,
["Apress", "Prentice Hall", "Expensive Publisher"],
lambda p: p.name,
)
publishers = (
Publisher.objects.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerysetEqual(publishers, ["Apress"], lambda p: p.name)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__range=[1, 3])
.order_by("pk")
)
self.assertQuerysetEqual(
publishers,
[
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__range=[1, 2])
.order_by("pk")
)
self.assertQuerysetEqual(
publishers,
["Apress", "Sams", "Prentice Hall", "Morgan Kaufmann"],
lambda p: p.name,
)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__in=[1, 3])
.order_by("pk")
)
self.assertQuerysetEqual(
publishers,
["Sams", "Morgan Kaufmann", "Expensive Publisher"],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(
num_books__isnull=True
)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = (
Book.objects.annotate(num_authors=Count("authors__name"))
.filter(num_authors__exact=2)
.order_by("pk")
)
self.assertQuerysetEqual(
books,
[
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name,
)
authors = (
Author.objects.annotate(num_friends=Count("friends__id", distinct=True))
.filter(num_friends=0)
.order_by("pk")
)
self.assertQuerysetEqual(authors, ["Brad Dayley"], lambda a: a.name)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerysetEqual(
publishers, ["Apress", "Prentice Hall"], lambda p: p.name
)
publishers = (
Publisher.objects.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
)
self.assertQuerysetEqual(publishers, ["Apress"], lambda p: p.name)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(
authors__name__contains="Norvig", num_authors__gt=1
)
self.assertQuerysetEqual(
books, ["Artificial Intelligence: A Modern Approach"], lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains="Norvig")
b = Book.objects.get(name__contains="Done Right")
b.authors.add(a)
b.save()
vals = (
Book.objects.annotate(num_authors=Count("authors__id"))
.filter(authors__name__contains="Norvig", num_authors__gt=1)
.aggregate(Avg("rating"))
)
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = (
Publisher.objects.annotate(
earliest_book=Min("book__pubdate"),
)
.exclude(earliest_book=None)
.order_by("earliest_book")
.values(
"earliest_book",
"num_awards",
"id",
"name",
)
)
self.assertEqual(
list(publishers),
[
{
"earliest_book": datetime.date(1991, 10, 15),
"num_awards": 9,
"id": self.p4.id,
"name": "Morgan Kaufmann",
},
{
"earliest_book": datetime.date(1995, 1, 15),
"num_awards": 7,
"id": self.p3.id,
"name": "Prentice Hall",
},
{
"earliest_book": datetime.date(2007, 12, 6),
"num_awards": 3,
"id": self.p1.id,
"name": "Apress",
},
{
"earliest_book": datetime.date(2008, 3, 3),
"num_awards": 1,
"id": self.p2.id,
"name": "Sams",
},
],
)
vals = Store.objects.aggregate(
Max("friday_night_closing"), Min("original_opening")
)
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
)
def test_annotate_values_list(self):
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("pk", "isbn", "mean_age")
)
self.assertEqual(list(books), [(self.b1.id, "159059725", 34.5)])
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("isbn")
)
self.assertEqual(list(books), [("159059725",)])
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("mean_age")
)
self.assertEqual(list(books), [(34.5,)])
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("mean_age", flat=True)
)
self.assertEqual(list(books), [34.5])
books = (
Book.objects.values_list("price")
.annotate(count=Count("price"))
.order_by("-count", "price")
)
self.assertEqual(
list(books),
[
(Decimal("29.69"), 2),
(Decimal("23.09"), 1),
(Decimal("30"), 1),
(Decimal("75"), 1),
(Decimal("82.8"), 1),
],
)
def test_dates_with_aggregation(self):
"""
.dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates(
"pubdate", "year"
)
self.assertSequenceEqual(
dates,
[
datetime.date(1991, 1, 1),
datetime.date(1995, 1, 1),
datetime.date(2007, 1, 1),
datetime.date(2008, 1, 1),
],
)
def test_values_aggregation(self):
# Refs #20782
max_rating = Book.objects.values("rating").aggregate(max_rating=Max("rating"))
self.assertEqual(max_rating["max_rating"], 5)
max_books_per_rating = (
Book.objects.values("rating")
.annotate(books_per_rating=Count("id"))
.aggregate(Max("books_per_rating"))
)
self.assertEqual(max_books_per_rating, {"books_per_rating__max": 3})
def test_ticket17424(self):
"""
Doing exclude() on a foreign model after annotate() doesn't crash.
"""
all_books = list(Book.objects.values_list("pk", flat=True).order_by("pk"))
annotated_books = Book.objects.order_by("pk").annotate(one=Count("id"))
# The value doesn't matter, we just need any negative
# constraint on a related model that's a noop.
excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__")
# Try to generate query tree
str(excluded_books.query)
self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk)
# Check internal state
self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type)
def test_ticket12886(self):
"""
Aggregation over sliced queryset works correctly.
"""
qs = Book.objects.order_by("-rating")[0:3]
vals = qs.aggregate(average_top3_rating=Avg("rating"))["average_top3_rating"]
self.assertAlmostEqual(vals, 4.5, places=2)
def test_ticket11881(self):
"""
Subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE or
select_related() stuff.
"""
qs = (
Book.objects.select_for_update()
.order_by("pk")
.select_related("publisher")
.annotate(max_pk=Max("pk"))
)
with CaptureQueriesContext(connection) as captured_queries:
qs.aggregate(avg_pk=Avg("max_pk"))
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]["sql"].lower()
self.assertNotIn("for update", qstr)
forced_ordering = connection.ops.force_no_ordering()
if forced_ordering:
# If the backend needs to force an ordering we make sure it's
# the only "ORDER BY" clause present in the query.
self.assertEqual(
re.findall(r"order by (\w+)", qstr),
[", ".join(f[1][0] for f in forced_ordering).lower()],
)
else:
self.assertNotIn("order by", qstr)
self.assertEqual(qstr.count(" join "), 0)
def test_decimal_max_digits_has_no_effect(self):
Book.objects.all().delete()
a1 = Author.objects.first()
p1 = Publisher.objects.first()
thedate = timezone.now()
for i in range(10):
Book.objects.create(
isbn="abcde{}".format(i),
name="none",
pages=10,
rating=4.0,
price=9999.98,
contact=a1,
publisher=p1,
pubdate=thedate,
)
book = Book.objects.aggregate(price_sum=Sum("price"))
self.assertEqual(book["price_sum"], Decimal("99999.80"))
def test_nonaggregate_aggregation_throws(self):
with self.assertRaisesMessage(TypeError, "fail is not an aggregate expression"):
Book.objects.aggregate(fail=F("price"))
def test_nonfield_annotation(self):
book = Book.objects.annotate(val=Max(Value(2))).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(
val=Max(Value(2), output_field=IntegerField())
).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
def test_annotation_expressions(self):
authors = Author.objects.annotate(
combined_ages=Sum(F("age") + F("friends__age"))
).order_by("name")
authors2 = Author.objects.annotate(
combined_ages=Sum("age") + Sum("friends__age")
).order_by("name")
for qs in (authors, authors2):
self.assertQuerysetEqual(
qs,
[
("Adrian Holovaty", 132),
("Brad Dayley", None),
("Jacob Kaplan-Moss", 129),
("James Bennett", 63),
("Jeffrey Forcier", 128),
("Paul Bissex", 120),
("Peter Norvig", 103),
("Stuart Russell", 103),
("Wesley J. Chun", 176),
],
lambda a: (a.name, a.combined_ages),
)
def test_aggregation_expressions(self):
a1 = Author.objects.aggregate(av_age=Sum("age") / Count("*"))
a2 = Author.objects.aggregate(av_age=Sum("age") / Count("age"))
a3 = Author.objects.aggregate(av_age=Avg("age"))
self.assertEqual(a1, {"av_age": 37})
self.assertEqual(a2, {"av_age": 37})
self.assertEqual(a3, {"av_age": Approximate(37.4, places=1)})
def test_avg_decimal_field(self):
v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg("price")))[
"avg_price"
]
self.assertIsInstance(v, Decimal)
self.assertEqual(v, Approximate(Decimal("47.39"), places=2))
def test_order_of_precedence(self):
p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg("price") + 2) * 3)
self.assertEqual(p1, {"avg_price": Approximate(Decimal("148.18"), places=2)})
p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg("price") + 2 * 3)
self.assertEqual(p2, {"avg_price": Approximate(Decimal("53.39"), places=2)})
def test_combine_different_types(self):
msg = (
"Cannot infer type of '+' expression involving these types: FloatField, "
"DecimalField. You must set output_field."
)
qs = Book.objects.annotate(sums=Sum("rating") + Sum("pages") + Sum("price"))
with self.assertRaisesMessage(FieldError, msg):
qs.first()
with self.assertRaisesMessage(FieldError, msg):
qs.first()
b1 = Book.objects.annotate(
sums=Sum(F("rating") + F("pages") + F("price"), output_field=IntegerField())
).get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
b2 = Book.objects.annotate(
sums=Sum(F("rating") + F("pages") + F("price"), output_field=FloatField())
).get(pk=self.b4.pk)
self.assertEqual(b2.sums, 383.69)
b3 = Book.objects.annotate(
sums=Sum(F("rating") + F("pages") + F("price"), output_field=DecimalField())
).get(pk=self.b4.pk)
self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2))
def test_complex_aggregations_require_kwarg(self):
with self.assertRaisesMessage(
TypeError, "Complex annotations require an alias"
):
Author.objects.annotate(Sum(F("age") + F("friends__age")))
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
Author.objects.aggregate(Sum("age") / Count("age"))
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
Author.objects.aggregate(Sum(1))
def test_aggregate_over_complex_annotation(self):
qs = Author.objects.annotate(combined_ages=Sum(F("age") + F("friends__age")))
age = qs.aggregate(max_combined_age=Max("combined_ages"))
self.assertEqual(age["max_combined_age"], 176)
age = qs.aggregate(max_combined_age_doubled=Max("combined_ages") * 2)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max("combined_ages") + Max("combined_ages")
)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max("combined_ages") + Max("combined_ages"),
sum_combined_age=Sum("combined_ages"),
)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
self.assertEqual(age["sum_combined_age"], 954)
age = qs.aggregate(
max_combined_age_doubled=Max("combined_ages") + Max("combined_ages"),
sum_combined_age_doubled=Sum("combined_ages") + Sum("combined_ages"),
)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
self.assertEqual(age["sum_combined_age_doubled"], 954 * 2)
def test_values_annotation_with_expression(self):
# ensure the F() is promoted to the group by clause
qs = Author.objects.values("name").annotate(another_age=Sum("age") + F("age"))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a["another_age"], 68)
qs = qs.annotate(friend_count=Count("friends"))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a["friend_count"], 2)
qs = (
qs.annotate(combined_age=Sum("age") + F("friends__age"))
.filter(name="Adrian Holovaty")
.order_by("-combined_age")
)
self.assertEqual(
list(qs),
[
{
"name": "Adrian Holovaty",
"another_age": 68,
"friend_count": 1,
"combined_age": 69,
},
{
"name": "Adrian Holovaty",
"another_age": 68,
"friend_count": 1,
"combined_age": 63,
},
],
)
vals = qs.values("name", "combined_age")
self.assertEqual(
list(vals),
[
{"name": "Adrian Holovaty", "combined_age": 69},
{"name": "Adrian Holovaty", "combined_age": 63},
],
)
def test_annotate_values_aggregate(self):
alias_age = (
Author.objects.annotate(age_alias=F("age"))
.values(
"age_alias",
)
.aggregate(sum_age=Sum("age_alias"))
)
age = Author.objects.values("age").aggregate(sum_age=Sum("age"))
self.assertEqual(alias_age["sum_age"], age["sum_age"])
def test_annotate_over_annotate(self):
author = (
Author.objects.annotate(age_alias=F("age"))
.annotate(sum_age=Sum("age_alias"))
.get(name="Adrian Holovaty")
)
other_author = Author.objects.annotate(sum_age=Sum("age")).get(
name="Adrian Holovaty"
)
self.assertEqual(author.sum_age, other_author.sum_age)
def test_aggregate_over_aggregate(self):
msg = "Cannot compute Avg('age'): 'age' is an aggregate"
with self.assertRaisesMessage(FieldError, msg):
Author.objects.annotate(age_alias=F("age"),).aggregate(
age=Sum(F("age")),
avg_age=Avg(F("age")),
)
def test_annotated_aggregate_over_annotated_aggregate(self):
with self.assertRaisesMessage(
FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate"
):
Book.objects.annotate(Max("id")).annotate(Sum("id__max"))
class MyMax(Max):
def as_sql(self, compiler, connection):
self.set_source_expressions(self.get_source_expressions()[0:1])
return super().as_sql(compiler, connection)
with self.assertRaisesMessage(
FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate"
):
Book.objects.annotate(Max("id")).annotate(my_max=MyMax("id__max", "price"))
def test_multi_arg_aggregate(self):
class MyMax(Max):
output_field = DecimalField()
def as_sql(self, compiler, connection):
copy = self.copy()
copy.set_source_expressions(copy.get_source_expressions()[0:1])
return super(MyMax, copy).as_sql(compiler, connection)
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
Book.objects.aggregate(MyMax("pages", "price"))
with self.assertRaisesMessage(
TypeError, "Complex annotations require an alias"
):
Book.objects.annotate(MyMax("pages", "price"))
Book.objects.aggregate(max_field=MyMax("pages", "price"))
def test_add_implementation(self):
class MySum(Sum):
pass
# test completely changing how the output is rendered
def lower_case_function_override(self, compiler, connection):
sql, params = compiler.compile(self.source_expressions[0])
substitutions = {
"function": self.function.lower(),
"expressions": sql,
"distinct": "",
}
substitutions.update(self.extra)
return self.template % substitutions, params
setattr(MySum, "as_" + connection.vendor, lower_case_function_override)
qs = Book.objects.annotate(
sums=MySum(
F("rating") + F("pages") + F("price"), output_field=IntegerField()
)
)
self.assertEqual(str(qs.query).count("sum("), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test changing the dict and delegating
def lower_case_function_super(self, compiler, connection):
self.extra["function"] = self.function.lower()
return super(MySum, self).as_sql(compiler, connection)
setattr(MySum, "as_" + connection.vendor, lower_case_function_super)
qs = Book.objects.annotate(
sums=MySum(
F("rating") + F("pages") + F("price"), output_field=IntegerField()
)
)
self.assertEqual(str(qs.query).count("sum("), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test overriding all parts of the template
def be_evil(self, compiler, connection):
substitutions = {"function": "MAX", "expressions": "2", "distinct": ""}
substitutions.update(self.extra)
return self.template % substitutions, ()
setattr(MySum, "as_" + connection.vendor, be_evil)
qs = Book.objects.annotate(
sums=MySum(
F("rating") + F("pages") + F("price"), output_field=IntegerField()
)
)
self.assertEqual(str(qs.query).count("MAX("), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 2)
def test_complex_values_aggregation(self):
max_rating = Book.objects.values("rating").aggregate(
double_max_rating=Max("rating") + Max("rating")
)
self.assertEqual(max_rating["double_max_rating"], 5 * 2)
max_books_per_rating = (
Book.objects.values("rating")
.annotate(books_per_rating=Count("id") + 5)
.aggregate(Max("books_per_rating"))
)
self.assertEqual(max_books_per_rating, {"books_per_rating__max": 3 + 5})
def test_expression_on_aggregation(self):
qs = (
Publisher.objects.annotate(
price_or_median=Greatest(
Avg("book__rating", output_field=DecimalField()), Avg("book__price")
)
)
.filter(price_or_median__gte=F("num_awards"))
.order_by("num_awards")
)
self.assertQuerysetEqual(qs, [1, 3, 7, 9], lambda v: v.num_awards)
qs2 = (
Publisher.objects.annotate(
rating_or_num_awards=Greatest(
Avg("book__rating"), F("num_awards"), output_field=FloatField()
)
)
.filter(rating_or_num_awards__gt=F("num_awards"))
.order_by("num_awards")
)
self.assertQuerysetEqual(qs2, [1, 3], lambda v: v.num_awards)
def test_arguments_must_be_expressions(self):
msg = "QuerySet.aggregate() received non-expression(s): %s."
with self.assertRaisesMessage(TypeError, msg % FloatField()):
Book.objects.aggregate(FloatField())
with self.assertRaisesMessage(TypeError, msg % True):
Book.objects.aggregate(is_book=True)
with self.assertRaisesMessage(
TypeError, msg % ", ".join([str(FloatField()), "True"])
):
Book.objects.aggregate(FloatField(), Avg("price"), is_book=True)
def test_aggregation_subquery_annotation(self):
"""Subquery annotations are excluded from the GROUP BY if they are
not explicitly grouped against."""
latest_book_pubdate_qs = (
Book.objects.filter(publisher=OuterRef("pk"))
.order_by("-pubdate")
.values("pubdate")[:1]
)
publisher_qs = Publisher.objects.annotate(
latest_book_pubdate=Subquery(latest_book_pubdate_qs),
).annotate(count=Count("book"))
with self.assertNumQueries(1) as ctx:
list(publisher_qs)
self.assertEqual(ctx[0]["sql"].count("SELECT"), 2)
# The GROUP BY should not be by alias either.
self.assertEqual(ctx[0]["sql"].lower().count("latest_book_pubdate"), 1)
def test_aggregation_subquery_annotation_exists(self):
latest_book_pubdate_qs = (
Book.objects.filter(publisher=OuterRef("pk"))
.order_by("-pubdate")
.values("pubdate")[:1]
)
publisher_qs = Publisher.objects.annotate(
latest_book_pubdate=Subquery(latest_book_pubdate_qs),
count=Count("book"),
)
self.assertTrue(publisher_qs.exists())
def test_aggregation_filter_exists(self):
publishers_having_more_than_one_book_qs = (
Book.objects.values("publisher")
.annotate(cnt=Count("isbn"))
.filter(cnt__gt=1)
)
query = publishers_having_more_than_one_book_qs.query.exists(
using=connection.alias
)
_, _, group_by = query.get_compiler(connection=connection).pre_sql_setup()
self.assertEqual(len(group_by), 1)
def test_aggregation_exists_annotation(self):
published_books = Book.objects.filter(publisher=OuterRef("pk"))
publisher_qs = Publisher.objects.annotate(
published_book=Exists(published_books),
count=Count("book"),
).values_list("name", flat=True)
self.assertCountEqual(
list(publisher_qs),
[
"Apress",
"Morgan Kaufmann",
"Jonno's House of Books",
"Prentice Hall",
"Sams",
],
)
def test_aggregation_subquery_annotation_values(self):
"""
Subquery annotations and external aliases are excluded from the GROUP
BY if they are not selected.
"""
books_qs = (
Book.objects.annotate(
first_author_the_same_age=Subquery(
Author.objects.filter(
age=OuterRef("contact__friends__age"),
)
.order_by("age")
.values("id")[:1],
)
)
.filter(
publisher=self.p1,
first_author_the_same_age__isnull=False,
)
.annotate(
min_age=Min("contact__friends__age"),
)
.values("name", "min_age")
.order_by("name")
)
self.assertEqual(
list(books_qs),
[
{"name": "Practical Django Projects", "min_age": 34},
{
"name": (
"The Definitive Guide to Django: Web Development Done Right"
),
"min_age": 29,
},
],
)
def test_aggregation_subquery_annotation_values_collision(self):
books_rating_qs = Book.objects.filter(
publisher=OuterRef("pk"),
price=Decimal("29.69"),
).values("rating")
publisher_qs = (
Publisher.objects.filter(
book__contact__age__gt=20,
name=self.p1.name,
)
.annotate(
rating=Subquery(books_rating_qs),
contacts_count=Count("book__contact"),
)
.values("rating")
.annotate(total_count=Count("rating"))
)
self.assertEqual(
list(publisher_qs),
[
{"rating": 4.0, "total_count": 2},
],
)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_subquery_annotation_multivalued(self):
"""
Subquery annotations must be included in the GROUP BY if they use
potentially multivalued relations (contain the LOOKUP_SEP).
"""
subquery_qs = Author.objects.filter(
pk=OuterRef("pk"),
book__name=OuterRef("book__name"),
).values("pk")
author_qs = Author.objects.annotate(
subquery_id=Subquery(subquery_qs),
).annotate(count=Count("book"))
self.assertEqual(author_qs.count(), Author.objects.count())
def test_aggregation_order_by_not_selected_annotation_values(self):
result_asc = [
self.b4.pk,
self.b3.pk,
self.b1.pk,
self.b2.pk,
self.b5.pk,
self.b6.pk,
]
result_desc = result_asc[::-1]
tests = [
("min_related_age", result_asc),
("-min_related_age", result_desc),
(F("min_related_age"), result_asc),
(F("min_related_age").asc(), result_asc),
(F("min_related_age").desc(), result_desc),
]
for ordering, expected_result in tests:
with self.subTest(ordering=ordering):
books_qs = (
Book.objects.annotate(
min_age=Min("authors__age"),
)
.annotate(
min_related_age=Coalesce("min_age", "contact__age"),
)
.order_by(ordering)
.values_list("pk", flat=True)
)
self.assertEqual(list(books_qs), expected_result)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_group_by_subquery_annotation(self):
"""
Subquery annotations are included in the GROUP BY if they are
grouped against.
"""
long_books_count_qs = (
Book.objects.filter(
publisher=OuterRef("pk"),
pages__gt=400,
)
.values("publisher")
.annotate(count=Count("pk"))
.values("count")
)
groups = [
Subquery(long_books_count_qs),
long_books_count_qs,
long_books_count_qs.query,
]
for group in groups:
with self.subTest(group=group.__class__.__name__):
long_books_count_breakdown = Publisher.objects.values_list(
group,
).annotate(total=Count("*"))
self.assertEqual(dict(long_books_count_breakdown), {None: 1, 1: 4})
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_group_by_exists_annotation(self):
"""
Exists annotations are included in the GROUP BY if they are
grouped against.
"""
long_books_qs = Book.objects.filter(
publisher=OuterRef("pk"),
pages__gt=800,
)
has_long_books_breakdown = Publisher.objects.values_list(
Exists(long_books_qs),
).annotate(total=Count("*"))
self.assertEqual(dict(has_long_books_breakdown), {True: 2, False: 3})
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_subquery_annotation_related_field(self):
publisher = Publisher.objects.create(name=self.a9.name, num_awards=2)
book = Book.objects.create(
isbn="159059999",
name="Test book.",
pages=819,
rating=2.5,
price=Decimal("14.44"),
contact=self.a9,
publisher=publisher,
pubdate=datetime.date(2019, 12, 6),
)
book.authors.add(self.a5, self.a6, self.a7)
books_qs = (
Book.objects.annotate(
contact_publisher=Subquery(
Publisher.objects.filter(
pk=OuterRef("publisher"),
name=OuterRef("contact__name"),
).values("name")[:1],
)
)
.filter(
contact_publisher__isnull=False,
)
.annotate(count=Count("authors"))
)
self.assertSequenceEqual(books_qs, [book])
# FIXME: GROUP BY doesn't need to include a subquery with
# non-multivalued JOINs, see Col.possibly_multivalued (refs #31150):
# with self.assertNumQueries(1) as ctx:
# self.assertSequenceEqual(books_qs, [book])
# self.assertEqual(ctx[0]['sql'].count('SELECT'), 2)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_nested_subquery_outerref(self):
publisher_with_same_name = Publisher.objects.filter(
id__in=Subquery(
Publisher.objects.filter(
name=OuterRef(OuterRef("publisher__name")),
).values("id"),
),
).values(publisher_count=Count("id"))[:1]
books_breakdown = Book.objects.annotate(
publisher_count=Subquery(publisher_with_same_name),
authors_count=Count("authors"),
).values_list("publisher_count", flat=True)
self.assertSequenceEqual(books_breakdown, [1] * 6)
def test_aggregation_exists_multivalued_outeref(self):
self.assertCountEqual(
Publisher.objects.annotate(
books_exists=Exists(
Book.objects.filter(publisher=OuterRef("book__publisher"))
),
books_count=Count("book"),
),
Publisher.objects.all(),
)
def test_filter_in_subquery_or_aggregation(self):
"""
Filtering against an aggregate requires the usage of the HAVING clause.
If such a filter is unionized to a non-aggregate one the latter will
also need to be moved to the HAVING clause and have its grouping
columns used in the GROUP BY.
When this is done with a subquery the specialized logic in charge of
using outer reference columns to group should be used instead of the
subquery itself as the latter might return multiple rows.
"""
authors = Author.objects.annotate(
Count("book"),
).filter(Q(book__count__gt=0) | Q(pk__in=Book.objects.values("authors")))
self.assertQuerysetEqual(authors, Author.objects.all(), ordered=False)
def test_aggregation_random_ordering(self):
"""Random() is not included in the GROUP BY when used for ordering."""
authors = Author.objects.annotate(contact_count=Count("book")).order_by("?")
self.assertQuerysetEqual(
authors,
[
("Adrian Holovaty", 1),
("Jacob Kaplan-Moss", 1),
("Brad Dayley", 1),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 1),
("Wesley J. Chun", 1),
("Stuart Russell", 1),
("Peter Norvig", 2),
],
lambda a: (a.name, a.contact_count),
ordered=False,
)
def test_empty_result_optimization(self):
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Sum("num_awards"),
books_count=Count("book"),
),
{
"sum_awards": None,
"books_count": 0,
},
)
# Expression without empty_result_set_value forces queries to be
# executed even if they would return an empty result set.
raw_books_count = Func("book", function="COUNT")
raw_books_count.contains_aggregate = True
with self.assertNumQueries(1):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Sum("num_awards"),
books_count=raw_books_count,
),
{
"sum_awards": None,
"books_count": 0,
},
)
def test_coalesced_empty_result_set(self):
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Coalesce(Sum("num_awards"), 0),
)["sum_awards"],
0,
)
# Multiple expressions.
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Coalesce(Sum("num_awards"), None, 0),
)["sum_awards"],
0,
)
# Nested coalesce.
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Coalesce(Coalesce(Sum("num_awards"), None), 0),
)["sum_awards"],
0,
)
# Expression coalesce.
with self.assertNumQueries(1):
self.assertIsInstance(
Store.objects.none().aggregate(
latest_opening=Coalesce(
Max("original_opening"),
RawSQL("CURRENT_TIMESTAMP", []),
),
)["latest_opening"],
datetime.datetime,
)
def test_aggregation_default_unsupported_by_count(self):
msg = "Count does not allow default."
with self.assertRaisesMessage(TypeError, msg):
Count("age", default=0)
def test_aggregation_default_unset(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age"),
)
self.assertIsNone(result["value"])
def test_aggregation_default_zero(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age", default=0),
)
self.assertEqual(result["value"], 0)
def test_aggregation_default_integer(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age", default=21),
)
self.assertEqual(result["value"], 21)
def test_aggregation_default_expression(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age", default=Value(5) * Value(7)),
)
self.assertEqual(result["value"], 35)
def test_aggregation_default_group_by(self):
qs = (
Publisher.objects.values("name")
.annotate(
books=Count("book"),
pages=Sum("book__pages", default=0),
)
.filter(books=0)
)
self.assertSequenceEqual(
qs,
[{"name": "Jonno's House of Books", "books": 0, "pages": 0}],
)
def test_aggregation_default_compound_expression(self):
# Scale rating to a percentage; default to 50% if no books published.
formula = Avg("book__rating", default=2.5) * 20.0
queryset = Publisher.objects.annotate(rating=formula).order_by("name")
self.assertSequenceEqual(
queryset.values("name", "rating"),
[
{"name": "Apress", "rating": 85.0},
{"name": "Jonno's House of Books", "rating": 50.0},
{"name": "Morgan Kaufmann", "rating": 100.0},
{"name": "Prentice Hall", "rating": 80.0},
{"name": "Sams", "rating": 60.0},
],
)
def test_aggregation_default_using_time_from_python(self):
expr = Min(
"store__friday_night_closing",
filter=~Q(store__name="Amazon.com"),
default=datetime.time(17),
)
if connection.vendor == "mysql":
# Workaround for #30224 for MySQL & MariaDB.
expr.default = Cast(expr.default, TimeField())
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{"isbn": "013235613", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "013790395",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "067232959", "oldest_store_opening": datetime.time(17)},
{"isbn": "155860191", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "159059725",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "159059996", "oldest_store_opening": datetime.time(21, 30)},
],
)
def test_aggregation_default_using_time_from_database(self):
now = timezone.now().astimezone(datetime.timezone.utc)
expr = Min(
"store__friday_night_closing",
filter=~Q(store__name="Amazon.com"),
default=TruncHour(NowUTC(), output_field=TimeField()),
)
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{"isbn": "013235613", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "013790395",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "067232959", "oldest_store_opening": datetime.time(now.hour)},
{"isbn": "155860191", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "159059725",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "159059996", "oldest_store_opening": datetime.time(21, 30)},
],
)
def test_aggregation_default_using_date_from_python(self):
expr = Min("book__pubdate", default=datetime.date(1970, 1, 1))
if connection.vendor == "mysql":
# Workaround for #30224 for MySQL & MariaDB.
expr.default = Cast(expr.default, DateField())
queryset = Publisher.objects.annotate(earliest_pubdate=expr).order_by("name")
self.assertSequenceEqual(
queryset.values("name", "earliest_pubdate"),
[
{"name": "Apress", "earliest_pubdate": datetime.date(2007, 12, 6)},
{
"name": "Jonno's House of Books",
"earliest_pubdate": datetime.date(1970, 1, 1),
},
{
"name": "Morgan Kaufmann",
"earliest_pubdate": datetime.date(1991, 10, 15),
},
{
"name": "Prentice Hall",
"earliest_pubdate": datetime.date(1995, 1, 15),
},
{"name": "Sams", "earliest_pubdate": datetime.date(2008, 3, 3)},
],
)
def test_aggregation_default_using_date_from_database(self):
now = timezone.now().astimezone(datetime.timezone.utc)
expr = Min("book__pubdate", default=TruncDate(NowUTC()))
queryset = Publisher.objects.annotate(earliest_pubdate=expr).order_by("name")
self.assertSequenceEqual(
queryset.values("name", "earliest_pubdate"),
[
{"name": "Apress", "earliest_pubdate": datetime.date(2007, 12, 6)},
{"name": "Jonno's House of Books", "earliest_pubdate": now.date()},
{
"name": "Morgan Kaufmann",
"earliest_pubdate": datetime.date(1991, 10, 15),
},
{
"name": "Prentice Hall",
"earliest_pubdate": datetime.date(1995, 1, 15),
},
{"name": "Sams", "earliest_pubdate": datetime.date(2008, 3, 3)},
],
)
def test_aggregation_default_using_datetime_from_python(self):
expr = Min(
"store__original_opening",
filter=~Q(store__name="Amazon.com"),
default=datetime.datetime(1970, 1, 1),
)
if connection.vendor == "mysql":
# Workaround for #30224 for MySQL & MariaDB.
expr.default = Cast(expr.default, DateTimeField())
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{
"isbn": "013235613",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "013790395",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "067232959",
"oldest_store_opening": datetime.datetime(1970, 1, 1),
},
{
"isbn": "155860191",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "159059725",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "159059996",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
],
)
def test_aggregation_default_using_datetime_from_database(self):
now = timezone.now().astimezone(datetime.timezone.utc)
expr = Min(
"store__original_opening",
filter=~Q(store__name="Amazon.com"),
default=TruncHour(NowUTC(), output_field=DateTimeField()),
)
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{
"isbn": "013235613",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "013790395",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "067232959",
"oldest_store_opening": now.replace(
minute=0, second=0, microsecond=0, tzinfo=None
),
},
{
"isbn": "155860191",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "159059725",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "159059996",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
],
)
def test_aggregation_default_using_duration_from_python(self):
result = Publisher.objects.filter(num_awards__gt=3).aggregate(
value=Sum("duration", default=datetime.timedelta(0)),
)
self.assertEqual(result["value"], datetime.timedelta(0))
def test_aggregation_default_using_duration_from_database(self):
result = Publisher.objects.filter(num_awards__gt=3).aggregate(
value=Sum("duration", default=Now() - Now()),
)
self.assertEqual(result["value"], datetime.timedelta(0))
def test_aggregation_default_using_decimal_from_python(self):
result = Book.objects.filter(rating__lt=3.0).aggregate(
value=Sum("price", default=Decimal("0.00")),
)
self.assertEqual(result["value"], Decimal("0.00"))
def test_aggregation_default_using_decimal_from_database(self):
result = Book.objects.filter(rating__lt=3.0).aggregate(
value=Sum("price", default=Pi()),
)
self.assertAlmostEqual(result["value"], Decimal.from_float(math.pi), places=6)
def test_aggregation_default_passed_another_aggregate(self):
result = Book.objects.aggregate(
value=Sum("price", filter=Q(rating__lt=3.0), default=Avg("pages") / 10.0),
)
self.assertAlmostEqual(result["value"], Decimal("61.72"), places=2)
def test_aggregation_default_after_annotation(self):
result = Publisher.objects.annotate(
double_num_awards=F("num_awards") * 2,
).aggregate(value=Sum("double_num_awards", default=0))
self.assertEqual(result["value"], 40)
def test_aggregation_default_not_in_aggregate(self):
result = Publisher.objects.annotate(
avg_rating=Avg("book__rating", default=2.5),
).aggregate(Sum("num_awards"))
self.assertEqual(result["num_awards__sum"], 20)
def test_exists_none_with_aggregate(self):
qs = Book.objects.annotate(
count=Count("id"),
exists=Exists(Author.objects.none()),
)
self.assertEqual(len(qs), 6)
def test_alias_sql_injection(self):
crafted_alias = """injected_name" from "aggregation_author"; --"""
msg = (
"Column aliases cannot contain whitespace characters, quotation marks, "
"semicolons, or SQL comments."
)
with self.assertRaisesMessage(ValueError, msg):
Author.objects.aggregate(**{crafted_alias: Avg("age")})
def test_exists_extra_where_with_aggregate(self):
qs = Book.objects.annotate(
count=Count("id"),
exists=Exists(Author.objects.extra(where=["1=0"])),
)
self.assertEqual(len(qs), 6)
|
7ec4de488d1a0b326c671ee2dfd6134a4678d82c9717846b680512442c1f656e | import logging
import os
import unittest
import warnings
from io import StringIO
from unittest import mock
from django.conf import settings
from django.contrib.staticfiles.finders import get_finder, get_finders
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import default_storage
from django.db import (
IntegrityError,
connection,
connections,
models,
router,
transaction,
)
from django.forms import (
CharField,
EmailField,
Form,
IntegerField,
ValidationError,
formset_factory,
)
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.test import (
SimpleTestCase,
TestCase,
TransactionTestCase,
skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.html import HTMLParseError, parse_html
from django.test.testcases import DatabaseOperationForbidden
from django.test.utils import (
CaptureQueriesContext,
TestContextDecorator,
ignore_warnings,
isolate_apps,
override_settings,
setup_test_environment,
)
from django.urls import NoReverseMatch, path, reverse, reverse_lazy
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.log import DEFAULT_LOGGING
from django.utils.version import PY311
from .models import Car, Person, PossessedCar
from .views import empty_response
class SkippingTestCase(SimpleTestCase):
def _assert_skipping(self, func, expected_exc, msg=None):
try:
if msg is not None:
with self.assertRaisesMessage(expected_exc, msg):
func()
else:
with self.assertRaises(expected_exc):
func()
except unittest.SkipTest:
self.fail("%s should not result in a skipped test." % func.__name__)
def test_skip_unless_db_feature(self):
"""
Testing the django.test.skipUnlessDBFeature decorator.
"""
# Total hack, but it works, just want an attribute that's always true.
@skipUnlessDBFeature("__class__")
def test_func():
raise ValueError
@skipUnlessDBFeature("notprovided")
def test_func2():
raise ValueError
@skipUnlessDBFeature("__class__", "__class__")
def test_func3():
raise ValueError
@skipUnlessDBFeature("__class__", "notprovided")
def test_func4():
raise ValueError
self._assert_skipping(test_func, ValueError)
self._assert_skipping(test_func2, unittest.SkipTest)
self._assert_skipping(test_func3, ValueError)
self._assert_skipping(test_func4, unittest.SkipTest)
class SkipTestCase(SimpleTestCase):
@skipUnlessDBFeature("missing")
def test_foo(self):
pass
self._assert_skipping(
SkipTestCase("test_foo").test_foo,
ValueError,
"skipUnlessDBFeature cannot be used on test_foo (test_utils.tests."
"SkippingTestCase.test_skip_unless_db_feature.<locals>.SkipTestCase%s) "
"as SkippingTestCase.test_skip_unless_db_feature.<locals>.SkipTestCase "
"doesn't allow queries against the 'default' database."
# Python 3.11 uses fully qualified test name in the output.
% (".test_foo" if PY311 else ""),
)
def test_skip_if_db_feature(self):
"""
Testing the django.test.skipIfDBFeature decorator.
"""
@skipIfDBFeature("__class__")
def test_func():
raise ValueError
@skipIfDBFeature("notprovided")
def test_func2():
raise ValueError
@skipIfDBFeature("__class__", "__class__")
def test_func3():
raise ValueError
@skipIfDBFeature("__class__", "notprovided")
def test_func4():
raise ValueError
@skipIfDBFeature("notprovided", "notprovided")
def test_func5():
raise ValueError
self._assert_skipping(test_func, unittest.SkipTest)
self._assert_skipping(test_func2, ValueError)
self._assert_skipping(test_func3, unittest.SkipTest)
self._assert_skipping(test_func4, unittest.SkipTest)
self._assert_skipping(test_func5, ValueError)
class SkipTestCase(SimpleTestCase):
@skipIfDBFeature("missing")
def test_foo(self):
pass
self._assert_skipping(
SkipTestCase("test_foo").test_foo,
ValueError,
"skipIfDBFeature cannot be used on test_foo (test_utils.tests."
"SkippingTestCase.test_skip_if_db_feature.<locals>.SkipTestCase%s) "
"as SkippingTestCase.test_skip_if_db_feature.<locals>.SkipTestCase "
"doesn't allow queries against the 'default' database."
# Python 3.11 uses fully qualified test name in the output.
% (".test_foo" if PY311 else ""),
)
class SkippingClassTestCase(TestCase):
def test_skip_class_unless_db_feature(self):
@skipUnlessDBFeature("__class__")
class NotSkippedTests(TestCase):
def test_dummy(self):
return
@skipUnlessDBFeature("missing")
@skipIfDBFeature("__class__")
class SkippedTests(TestCase):
def test_will_be_skipped(self):
self.fail("We should never arrive here.")
@skipIfDBFeature("__dict__")
class SkippedTestsSubclass(SkippedTests):
pass
test_suite = unittest.TestSuite()
test_suite.addTest(NotSkippedTests("test_dummy"))
try:
test_suite.addTest(SkippedTests("test_will_be_skipped"))
test_suite.addTest(SkippedTestsSubclass("test_will_be_skipped"))
except unittest.SkipTest:
self.fail("SkipTest should not be raised here.")
result = unittest.TextTestRunner(stream=StringIO()).run(test_suite)
self.assertEqual(result.testsRun, 3)
self.assertEqual(len(result.skipped), 2)
self.assertEqual(result.skipped[0][1], "Database has feature(s) __class__")
self.assertEqual(result.skipped[1][1], "Database has feature(s) __class__")
def test_missing_default_databases(self):
@skipIfDBFeature("missing")
class MissingDatabases(SimpleTestCase):
def test_assertion_error(self):
pass
suite = unittest.TestSuite()
try:
suite.addTest(MissingDatabases("test_assertion_error"))
except unittest.SkipTest:
self.fail("SkipTest should not be raised at this stage")
runner = unittest.TextTestRunner(stream=StringIO())
msg = (
"skipIfDBFeature cannot be used on <class 'test_utils.tests."
"SkippingClassTestCase.test_missing_default_databases.<locals>."
"MissingDatabases'> as it doesn't allow queries against the "
"'default' database."
)
with self.assertRaisesMessage(ValueError, msg):
runner.run(suite)
@override_settings(ROOT_URLCONF="test_utils.urls")
class AssertNumQueriesTests(TestCase):
def test_assert_num_queries(self):
def test_func():
raise ValueError
with self.assertRaises(ValueError):
self.assertNumQueries(2, test_func)
def test_assert_num_queries_with_client(self):
person = Person.objects.create(name="test")
self.assertNumQueries(
1, self.client.get, "/test_utils/get_person/%s/" % person.pk
)
self.assertNumQueries(
1, self.client.get, "/test_utils/get_person/%s/" % person.pk
)
def test_func():
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.assertNumQueries(2, test_func)
@unittest.skipUnless(
connection.vendor != "sqlite" or not connection.is_in_memory_db(),
"For SQLite in-memory tests, closing the connection destroys the database.",
)
class AssertNumQueriesUponConnectionTests(TransactionTestCase):
available_apps = []
def test_ignores_connection_configuration_queries(self):
real_ensure_connection = connection.ensure_connection
connection.close()
def make_configuration_query():
is_opening_connection = connection.connection is None
real_ensure_connection()
if is_opening_connection:
# Avoid infinite recursion. Creating a cursor calls
# ensure_connection() which is currently mocked by this method.
with connection.cursor() as cursor:
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
ensure_connection = (
"django.db.backends.base.base.BaseDatabaseWrapper.ensure_connection"
)
with mock.patch(ensure_connection, side_effect=make_configuration_query):
with self.assertNumQueries(1):
list(Car.objects.all())
class AssertQuerysetEqualTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.p1 = Person.objects.create(name="p1")
cls.p2 = Person.objects.create(name="p2")
def test_empty(self):
self.assertQuerysetEqual(Person.objects.filter(name="p3"), [])
def test_ordered(self):
self.assertQuerysetEqual(
Person.objects.order_by("name"),
[self.p1, self.p2],
)
def test_unordered(self):
self.assertQuerysetEqual(
Person.objects.order_by("name"), [self.p2, self.p1], ordered=False
)
def test_queryset(self):
self.assertQuerysetEqual(
Person.objects.order_by("name"),
Person.objects.order_by("name"),
)
def test_flat_values_list(self):
self.assertQuerysetEqual(
Person.objects.order_by("name").values_list("name", flat=True),
["p1", "p2"],
)
def test_transform(self):
self.assertQuerysetEqual(
Person.objects.order_by("name"),
[self.p1.pk, self.p2.pk],
transform=lambda x: x.pk,
)
def test_repr_transform(self):
self.assertQuerysetEqual(
Person.objects.order_by("name"),
[repr(self.p1), repr(self.p2)],
transform=repr,
)
def test_undefined_order(self):
# Using an unordered queryset with more than one ordered value
# is an error.
msg = (
"Trying to compare non-ordered queryset against more than one "
"ordered value."
)
with self.assertRaisesMessage(ValueError, msg):
self.assertQuerysetEqual(
Person.objects.all(),
[self.p1, self.p2],
)
# No error for one value.
self.assertQuerysetEqual(Person.objects.filter(name="p1"), [self.p1])
def test_repeated_values(self):
"""
assertQuerysetEqual checks the number of appearance of each item
when used with option ordered=False.
"""
batmobile = Car.objects.create(name="Batmobile")
k2000 = Car.objects.create(name="K 2000")
PossessedCar.objects.bulk_create(
[
PossessedCar(car=batmobile, belongs_to=self.p1),
PossessedCar(car=batmobile, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
]
)
with self.assertRaises(AssertionError):
self.assertQuerysetEqual(
self.p1.cars.all(), [batmobile, k2000], ordered=False
)
self.assertQuerysetEqual(
self.p1.cars.all(), [batmobile] * 2 + [k2000] * 4, ordered=False
)
def test_maxdiff(self):
names = ["Joe Smith %s" % i for i in range(20)]
Person.objects.bulk_create([Person(name=name) for name in names])
names.append("Extra Person")
with self.assertRaises(AssertionError) as ctx:
self.assertQuerysetEqual(
Person.objects.filter(name__startswith="Joe"),
names,
ordered=False,
transform=lambda p: p.name,
)
self.assertIn("Set self.maxDiff to None to see it.", str(ctx.exception))
original = self.maxDiff
self.maxDiff = None
try:
with self.assertRaises(AssertionError) as ctx:
self.assertQuerysetEqual(
Person.objects.filter(name__startswith="Joe"),
names,
ordered=False,
transform=lambda p: p.name,
)
finally:
self.maxDiff = original
exception_msg = str(ctx.exception)
self.assertNotIn("Set self.maxDiff to None to see it.", exception_msg)
for name in names:
self.assertIn(name, exception_msg)
@override_settings(ROOT_URLCONF="test_utils.urls")
class CaptureQueriesContextManagerTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.person_pk = str(Person.objects.create(name="test").pk)
def test_simple(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.get(pk=self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]["sql"])
with CaptureQueriesContext(connection) as captured_queries:
pass
self.assertEqual(0, len(captured_queries))
def test_within(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.get(pk=self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]["sql"])
def test_nested(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.count()
with CaptureQueriesContext(connection) as nested_captured_queries:
Person.objects.count()
self.assertEqual(1, len(nested_captured_queries))
self.assertEqual(2, len(captured_queries))
def test_failure(self):
with self.assertRaises(TypeError):
with CaptureQueriesContext(connection):
raise TypeError
def test_with_client(self):
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]["sql"])
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]["sql"])
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 2)
self.assertIn(self.person_pk, captured_queries[0]["sql"])
self.assertIn(self.person_pk, captured_queries[1]["sql"])
@override_settings(ROOT_URLCONF="test_utils.urls")
class AssertNumQueriesContextManagerTests(TestCase):
def test_simple(self):
with self.assertNumQueries(0):
pass
with self.assertNumQueries(1):
Person.objects.count()
with self.assertNumQueries(2):
Person.objects.count()
Person.objects.count()
def test_failure(self):
msg = "1 != 2 : 1 queries executed, 2 expected\nCaptured queries were:\n1."
with self.assertRaisesMessage(AssertionError, msg):
with self.assertNumQueries(2):
Person.objects.count()
with self.assertRaises(TypeError):
with self.assertNumQueries(4000):
raise TypeError
def test_with_client(self):
person = Person.objects.create(name="test")
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(2):
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
@override_settings(ROOT_URLCONF="test_utils.urls")
class AssertTemplateUsedContextManagerTests(SimpleTestCase):
def test_usage(self):
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/base.html")
with self.assertTemplateUsed(template_name="template_used/base.html"):
render_to_string("template_used/base.html")
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/include.html")
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/extends.html")
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/base.html")
render_to_string("template_used/base.html")
def test_nested_usage(self):
with self.assertTemplateUsed("template_used/base.html"):
with self.assertTemplateUsed("template_used/include.html"):
render_to_string("template_used/include.html")
with self.assertTemplateUsed("template_used/extends.html"):
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/extends.html")
with self.assertTemplateUsed("template_used/base.html"):
with self.assertTemplateUsed("template_used/alternative.html"):
render_to_string("template_used/alternative.html")
render_to_string("template_used/base.html")
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/extends.html")
with self.assertTemplateNotUsed("template_used/base.html"):
render_to_string("template_used/alternative.html")
render_to_string("template_used/base.html")
def test_not_used(self):
with self.assertTemplateNotUsed("template_used/base.html"):
pass
with self.assertTemplateNotUsed("template_used/alternative.html"):
pass
def test_error_message(self):
msg = "No templates used to render the response"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed("template_used/base.html"):
pass
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(template_name="template_used/base.html"):
pass
msg2 = (
"Template 'template_used/base.html' was not a template used to render "
"the response. Actual template(s) used: template_used/alternative.html"
)
with self.assertRaisesMessage(AssertionError, msg2):
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/alternative.html")
with self.assertRaisesMessage(
AssertionError, "No templates used to render the response"
):
response = self.client.get("/test_utils/no_template_used/")
self.assertTemplateUsed(response, "template_used/base.html")
def test_msg_prefix(self):
msg_prefix = "Prefix"
msg = f"{msg_prefix}: No templates used to render the response"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(
"template_used/base.html", msg_prefix=msg_prefix
):
pass
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(
template_name="template_used/base.html",
msg_prefix=msg_prefix,
):
pass
msg = (
f"{msg_prefix}: Template 'template_used/base.html' was not a "
f"template used to render the response. Actual template(s) used: "
f"template_used/alternative.html"
)
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(
"template_used/base.html", msg_prefix=msg_prefix
):
render_to_string("template_used/alternative.html")
def test_count(self):
with self.assertTemplateUsed("template_used/base.html", count=2):
render_to_string("template_used/base.html")
render_to_string("template_used/base.html")
msg = (
"Template 'template_used/base.html' was expected to be rendered "
"3 time(s) but was actually rendered 2 time(s)."
)
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed("template_used/base.html", count=3):
render_to_string("template_used/base.html")
render_to_string("template_used/base.html")
def test_failure(self):
msg = "response and/or template_name argument must be provided"
with self.assertRaisesMessage(TypeError, msg):
with self.assertTemplateUsed():
pass
msg = "No templates used to render the response"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(""):
pass
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(""):
render_to_string("template_used/base.html")
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(template_name=""):
pass
msg = (
"Template 'template_used/base.html' was not a template used to "
"render the response. Actual template(s) used: "
"template_used/alternative.html"
)
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/alternative.html")
def test_assert_used_on_http_response(self):
response = HttpResponse()
msg = "%s() is only usable on responses fetched using the Django test Client."
with self.assertRaisesMessage(ValueError, msg % "assertTemplateUsed"):
self.assertTemplateUsed(response, "template.html")
with self.assertRaisesMessage(ValueError, msg % "assertTemplateNotUsed"):
self.assertTemplateNotUsed(response, "template.html")
class HTMLEqualTests(SimpleTestCase):
def test_html_parser(self):
element = parse_html("<div><p>Hello</p></div>")
self.assertEqual(len(element.children), 1)
self.assertEqual(element.children[0].name, "p")
self.assertEqual(element.children[0].children[0], "Hello")
parse_html("<p>")
parse_html("<p attr>")
dom = parse_html("<p>foo")
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.name, "p")
self.assertEqual(dom[0], "foo")
def test_parse_html_in_script(self):
parse_html('<script>var a = "<p" + ">";</script>')
parse_html(
"""
<script>
var js_sha_link='<p>***</p>';
</script>
"""
)
# script content will be parsed to text
dom = parse_html(
"""
<script><p>foo</p> '</scr'+'ipt>' <span>bar</span></script>
"""
)
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.children[0], "<p>foo</p> '</scr'+'ipt>' <span>bar</span>")
def test_self_closing_tags(self):
self_closing_tags = [
"area",
"base",
"br",
"col",
"embed",
"hr",
"img",
"input",
"link",
"meta",
"param",
"source",
"track",
"wbr",
# Deprecated tags
"frame",
"spacer",
]
for tag in self_closing_tags:
with self.subTest(tag):
dom = parse_html("<p>Hello <%s> world</p>" % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], "Hello")
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], "world")
dom = parse_html("<p>Hello <%s /> world</p>" % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], "Hello")
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], "world")
def test_simple_equal_html(self):
self.assertHTMLEqual("", "")
self.assertHTMLEqual("<p></p>", "<p></p>")
self.assertHTMLEqual("<p></p>", " <p> </p> ")
self.assertHTMLEqual("<div><p>Hello</p></div>", "<div><p>Hello</p></div>")
self.assertHTMLEqual("<div><p>Hello</p></div>", "<div> <p>Hello</p> </div>")
self.assertHTMLEqual("<div>\n<p>Hello</p></div>", "<div><p>Hello</p></div>\n")
self.assertHTMLEqual(
"<div><p>Hello\nWorld !</p></div>", "<div><p>Hello World\n!</p></div>"
)
self.assertHTMLEqual(
"<div><p>Hello\nWorld !</p></div>", "<div><p>Hello World\n!</p></div>"
)
self.assertHTMLEqual("<p>Hello World !</p>", "<p>Hello World\n\n!</p>")
self.assertHTMLEqual("<p> </p>", "<p></p>")
self.assertHTMLEqual("<p/>", "<p></p>")
self.assertHTMLEqual("<p />", "<p></p>")
self.assertHTMLEqual("<input checked>", '<input checked="checked">')
self.assertHTMLEqual("<p>Hello", "<p> Hello")
self.assertHTMLEqual("<p>Hello</p>World", "<p>Hello</p> World")
def test_ignore_comments(self):
self.assertHTMLEqual(
"<div>Hello<!-- this is a comment --> World!</div>",
"<div>Hello World!</div>",
)
def test_unequal_html(self):
self.assertHTMLNotEqual("<p>Hello</p>", "<p>Hello!</p>")
self.assertHTMLNotEqual("<p>foobar</p>", "<p>foo bar</p>")
self.assertHTMLNotEqual("<p>foo bar</p>", "<p>foo bar</p>")
self.assertHTMLNotEqual("<p>foo nbsp</p>", "<p>foo </p>")
self.assertHTMLNotEqual("<p>foo #20</p>", "<p>foo </p>")
self.assertHTMLNotEqual(
"<p><span>Hello</span><span>World</span></p>",
"<p><span>Hello</span>World</p>",
)
self.assertHTMLNotEqual(
"<p><span>Hello</span>World</p>",
"<p><span>Hello</span><span>World</span></p>",
)
def test_attributes(self):
self.assertHTMLEqual(
'<input type="text" id="id_name" />', '<input id="id_name" type="text" />'
)
self.assertHTMLEqual(
"""<input type='text' id="id_name" />""",
'<input id="id_name" type="text" />',
)
self.assertHTMLNotEqual(
'<input type="text" id="id_name" />',
'<input type="password" id="id_name" />',
)
def test_class_attribute(self):
pairs = [
('<p class="foo bar"></p>', '<p class="bar foo"></p>'),
('<p class=" foo bar "></p>', '<p class="bar foo"></p>'),
('<p class=" foo bar "></p>', '<p class="bar foo"></p>'),
('<p class="foo\tbar"></p>', '<p class="bar foo"></p>'),
('<p class="\tfoo\tbar\t"></p>', '<p class="bar foo"></p>'),
('<p class="\t\t\tfoo\t\t\tbar\t\t\t"></p>', '<p class="bar foo"></p>'),
('<p class="\t \nfoo \t\nbar\n\t "></p>', '<p class="bar foo"></p>'),
]
for html1, html2 in pairs:
with self.subTest(html1):
self.assertHTMLEqual(html1, html2)
def test_boolean_attribute(self):
html1 = "<input checked>"
html2 = '<input checked="">'
html3 = '<input checked="checked">'
self.assertHTMLEqual(html1, html2)
self.assertHTMLEqual(html1, html3)
self.assertHTMLEqual(html2, html3)
self.assertHTMLNotEqual(html1, '<input checked="invalid">')
self.assertEqual(str(parse_html(html1)), "<input checked>")
self.assertEqual(str(parse_html(html2)), "<input checked>")
self.assertEqual(str(parse_html(html3)), "<input checked>")
def test_non_boolean_attibutes(self):
html1 = "<input value>"
html2 = '<input value="">'
html3 = '<input value="value">'
self.assertHTMLEqual(html1, html2)
self.assertHTMLNotEqual(html1, html3)
self.assertEqual(str(parse_html(html1)), '<input value="">')
self.assertEqual(str(parse_html(html2)), '<input value="">')
def test_normalize_refs(self):
pairs = [
("'", "'"),
("'", "'"),
("'", "'"),
("'", "'"),
("'", "'"),
("'", "'"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
]
for pair in pairs:
with self.subTest(repr(pair)):
self.assertHTMLEqual(*pair)
def test_complex_examples(self):
self.assertHTMLEqual(
"""<tr><th><label for="id_first_name">First name:</label></th>
<td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th>
<td><input type="text" id="id_last_name" name="last_name" value="Lennon" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th>
<td><input type="text" value="1940-10-9" name="birthday" id="id_birthday" /></td></tr>""", # NOQA
"""
<tr><th>
<label for="id_first_name">First name:</label></th><td>
<input type="text" name="first_name" value="John" id="id_first_name" />
</td></tr>
<tr><th>
<label for="id_last_name">Last name:</label></th><td>
<input type="text" name="last_name" value="Lennon" id="id_last_name" />
</td></tr>
<tr><th>
<label for="id_birthday">Birthday:</label></th><td>
<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />
</td></tr>
""",
)
self.assertHTMLEqual(
"""<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p>
This is a valid paragraph
<div> this is a div AFTER the p</div>
</body>
</html>""",
"""
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p> This is a valid paragraph
<!-- browsers would close the p tag here -->
<div> this is a div AFTER the p</div>
</p> <!-- this is invalid HTML parsing, but it should make no
difference in most cases -->
</body>
</html>""",
)
def test_html_contain(self):
# equal html contains each other
dom1 = parse_html("<p>foo")
dom2 = parse_html("<p>foo</p>")
self.assertIn(dom1, dom2)
self.assertIn(dom2, dom1)
dom2 = parse_html("<div><p>foo</p></div>")
self.assertIn(dom1, dom2)
self.assertNotIn(dom2, dom1)
self.assertNotIn("<p>foo</p>", dom2)
self.assertIn("foo", dom2)
# when a root element is used ...
dom1 = parse_html("<p>foo</p><p>bar</p>")
dom2 = parse_html("<p>foo</p><p>bar</p>")
self.assertIn(dom1, dom2)
dom1 = parse_html("<p>foo</p>")
self.assertIn(dom1, dom2)
dom1 = parse_html("<p>bar</p>")
self.assertIn(dom1, dom2)
dom1 = parse_html("<div><p>foo</p><p>bar</p></div>")
self.assertIn(dom2, dom1)
def test_count(self):
# equal html contains each other one time
dom1 = parse_html("<p>foo")
dom2 = parse_html("<p>foo</p>")
self.assertEqual(dom1.count(dom2), 1)
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<p>foo</p><p>bar</p>")
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<p>foo foo</p><p>foo</p>")
self.assertEqual(dom2.count("foo"), 3)
dom2 = parse_html('<p class="bar">foo</p>')
self.assertEqual(dom2.count("bar"), 0)
self.assertEqual(dom2.count("class"), 0)
self.assertEqual(dom2.count("p"), 0)
self.assertEqual(dom2.count("o"), 2)
dom2 = parse_html("<p>foo</p><p>foo</p>")
self.assertEqual(dom2.count(dom1), 2)
dom2 = parse_html('<div><p>foo<input type=""></p><p>foo</p></div>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<div><div><p>foo</p></div></div>")
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<p>foo<p>foo</p></p>")
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<p>foo<p>bar</p></p>")
self.assertEqual(dom2.count(dom1), 0)
# HTML with a root element contains the same HTML with no root element.
dom1 = parse_html("<p>foo</p><p>bar</p>")
dom2 = parse_html("<div><p>foo</p><p>bar</p></div>")
self.assertEqual(dom2.count(dom1), 1)
# Target of search is a sequence of child elements and appears more
# than once.
dom2 = parse_html("<div><p>foo</p><p>bar</p><p>foo</p><p>bar</p></div>")
self.assertEqual(dom2.count(dom1), 2)
# Searched HTML has additional children.
dom1 = parse_html("<a/><b/>")
dom2 = parse_html("<a/><b/><c/>")
self.assertEqual(dom2.count(dom1), 1)
# No match found in children.
dom1 = parse_html("<b/><a/>")
self.assertEqual(dom2.count(dom1), 0)
# Target of search found among children and grandchildren.
dom1 = parse_html("<b/><b/>")
dom2 = parse_html("<a><b/><b/></a><b/><b/>")
self.assertEqual(dom2.count(dom1), 2)
def test_root_element_escaped_html(self):
html = "<br>"
parsed = parse_html(html)
self.assertEqual(str(parsed), html)
def test_parsing_errors(self):
with self.assertRaises(AssertionError):
self.assertHTMLEqual("<p>", "")
with self.assertRaises(AssertionError):
self.assertHTMLEqual("", "<p>")
error_msg = (
"First argument is not valid HTML:\n"
"('Unexpected end tag `div` (Line 1, Column 6)', (1, 6))"
)
with self.assertRaisesMessage(AssertionError, error_msg):
self.assertHTMLEqual("< div></ div>", "<div></div>")
with self.assertRaises(HTMLParseError):
parse_html("</p>")
def test_escaped_html_errors(self):
msg = "<p>\n<foo>\n</p> != <p>\n<foo>\n</p>\n"
with self.assertRaisesMessage(AssertionError, msg):
self.assertHTMLEqual("<p><foo></p>", "<p><foo></p>")
with self.assertRaisesMessage(AssertionError, msg):
self.assertHTMLEqual("<p><foo></p>", "<p><foo></p>")
def test_contains_html(self):
response = HttpResponse(
"""<body>
This is a form: <form method="get">
<input type="text" name="Hello" />
</form></body>"""
)
self.assertNotContains(response, "<input name='Hello' type='text'>")
self.assertContains(response, '<form method="get">')
self.assertContains(response, "<input name='Hello' type='text'>", html=True)
self.assertNotContains(response, '<form method="get">', html=True)
invalid_response = HttpResponse("""<body <bad>>""")
with self.assertRaises(AssertionError):
self.assertContains(invalid_response, "<p></p>")
with self.assertRaises(AssertionError):
self.assertContains(response, '<p "whats" that>')
def test_unicode_handling(self):
response = HttpResponse(
'<p class="help">Some help text for the title (with Unicode ŠĐĆŽćžšđ)</p>'
)
self.assertContains(
response,
'<p class="help">Some help text for the title (with Unicode ŠĐĆŽćžšđ)</p>',
html=True,
)
class JSONEqualTests(SimpleTestCase):
def test_simple_equal(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr1": "foo", "attr2":"baz"}'
self.assertJSONEqual(json1, json2)
def test_simple_equal_unordered(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr2":"baz", "attr1": "foo"}'
self.assertJSONEqual(json1, json2)
def test_simple_equal_raise(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONEqual(json1, json2)
def test_equal_parsing_errors(self):
invalid_json = '{"attr1": "foo, "attr2":"baz"}'
valid_json = '{"attr1": "foo", "attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONEqual(invalid_json, valid_json)
with self.assertRaises(AssertionError):
self.assertJSONEqual(valid_json, invalid_json)
def test_simple_not_equal(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr2":"baz"}'
self.assertJSONNotEqual(json1, json2)
def test_simple_not_equal_raise(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr1": "foo", "attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONNotEqual(json1, json2)
def test_not_equal_parsing_errors(self):
invalid_json = '{"attr1": "foo, "attr2":"baz"}'
valid_json = '{"attr1": "foo", "attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONNotEqual(invalid_json, valid_json)
with self.assertRaises(AssertionError):
self.assertJSONNotEqual(valid_json, invalid_json)
class XMLEqualTests(SimpleTestCase):
def test_simple_equal(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr1='a' attr2='b' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_unordered(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_raise(self):
xml1 = "<elem attr1='a' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_raises_message(self):
xml1 = "<elem attr1='a' />"
xml2 = "<elem attr2='b' attr1='a' />"
msg = """{xml1} != {xml2}
- <elem attr1='a' />
+ <elem attr2='b' attr1='a' />
? ++++++++++
""".format(
xml1=repr(xml1), xml2=repr(xml2)
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertXMLEqual(xml1, xml2)
def test_simple_not_equal(self):
xml1 = "<elem attr1='a' attr2='c' />"
xml2 = "<elem attr1='a' attr2='b' />"
self.assertXMLNotEqual(xml1, xml2)
def test_simple_not_equal_raise(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLNotEqual(xml1, xml2)
def test_parsing_errors(self):
xml_unvalid = "<elem attr1='a attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLNotEqual(xml_unvalid, xml2)
def test_comment_root(self):
xml1 = "<?xml version='1.0'?><!-- comment1 --><elem attr1='a' attr2='b' />"
xml2 = "<?xml version='1.0'?><!-- comment2 --><elem attr2='b' attr1='a' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_with_leading_or_trailing_whitespace(self):
xml1 = "<elem>foo</elem> \t\n"
xml2 = " \t\n<elem>foo</elem>"
self.assertXMLEqual(xml1, xml2)
def test_simple_not_equal_with_whitespace_in_the_middle(self):
xml1 = "<elem>foo</elem><elem>bar</elem>"
xml2 = "<elem>foo</elem> <elem>bar</elem>"
self.assertXMLNotEqual(xml1, xml2)
def test_doctype_root(self):
xml1 = '<?xml version="1.0"?><!DOCTYPE root SYSTEM "example1.dtd"><root />'
xml2 = '<?xml version="1.0"?><!DOCTYPE root SYSTEM "example2.dtd"><root />'
self.assertXMLEqual(xml1, xml2)
def test_processing_instruction(self):
xml1 = (
'<?xml version="1.0"?>'
'<?xml-model href="http://www.example1.com"?><root />'
)
xml2 = (
'<?xml version="1.0"?>'
'<?xml-model href="http://www.example2.com"?><root />'
)
self.assertXMLEqual(xml1, xml2)
self.assertXMLEqual(
'<?xml-stylesheet href="style1.xslt" type="text/xsl"?><root />',
'<?xml-stylesheet href="style2.xslt" type="text/xsl"?><root />',
)
class SkippingExtraTests(TestCase):
fixtures = ["should_not_be_loaded.json"]
# HACK: This depends on internals of our TestCase subclasses
def __call__(self, result=None):
# Detect fixture loading by counting SQL queries, should be zero
with self.assertNumQueries(0):
super().__call__(result)
@unittest.skip("Fixture loading should not be performed for skipped tests.")
def test_fixtures_are_skipped(self):
pass
class AssertRaisesMsgTest(SimpleTestCase):
def test_assert_raises_message(self):
msg = "'Expected message' not found in 'Unexpected message'"
# context manager form of assertRaisesMessage()
with self.assertRaisesMessage(AssertionError, msg):
with self.assertRaisesMessage(ValueError, "Expected message"):
raise ValueError("Unexpected message")
# callable form
def func():
raise ValueError("Unexpected message")
with self.assertRaisesMessage(AssertionError, msg):
self.assertRaisesMessage(ValueError, "Expected message", func)
def test_special_re_chars(self):
"""assertRaisesMessage shouldn't interpret RE special chars."""
def func1():
raise ValueError("[.*x+]y?")
with self.assertRaisesMessage(ValueError, "[.*x+]y?"):
func1()
class AssertWarnsMessageTests(SimpleTestCase):
def test_context_manager(self):
with self.assertWarnsMessage(UserWarning, "Expected message"):
warnings.warn("Expected message", UserWarning)
def test_context_manager_failure(self):
msg = "Expected message' not found in 'Unexpected message'"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertWarnsMessage(UserWarning, "Expected message"):
warnings.warn("Unexpected message", UserWarning)
def test_callable(self):
def func():
warnings.warn("Expected message", UserWarning)
self.assertWarnsMessage(UserWarning, "Expected message", func)
def test_special_re_chars(self):
def func1():
warnings.warn("[.*x+]y?", UserWarning)
with self.assertWarnsMessage(UserWarning, "[.*x+]y?"):
func1()
# TODO: Remove when dropping support for PY39.
class AssertNoLogsTest(SimpleTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
logging.config.dictConfig(DEFAULT_LOGGING)
cls.addClassCleanup(logging.config.dictConfig, settings.LOGGING)
def setUp(self):
self.logger = logging.getLogger("django")
@override_settings(DEBUG=True)
def test_fails_when_log_emitted(self):
msg = "Unexpected logs found: ['INFO:django:FAIL!']"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertNoLogs("django", "INFO"):
self.logger.info("FAIL!")
@override_settings(DEBUG=True)
def test_text_level(self):
with self.assertNoLogs("django", "INFO"):
self.logger.debug("DEBUG logs are ignored.")
@override_settings(DEBUG=True)
def test_int_level(self):
with self.assertNoLogs("django", logging.INFO):
self.logger.debug("DEBUG logs are ignored.")
@override_settings(DEBUG=True)
def test_default_level(self):
with self.assertNoLogs("django"):
self.logger.debug("DEBUG logs are ignored.")
@override_settings(DEBUG=True)
def test_does_not_hide_other_failures(self):
msg = "1 != 2"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertNoLogs("django"):
self.assertEqual(1, 2)
class AssertFieldOutputTests(SimpleTestCase):
def test_assert_field_output(self):
error_invalid = ["Enter a valid email address."]
self.assertFieldOutput(
EmailField, {"[email protected]": "[email protected]"}, {"aaa": error_invalid}
)
with self.assertRaises(AssertionError):
self.assertFieldOutput(
EmailField,
{"[email protected]": "[email protected]"},
{"aaa": error_invalid + ["Another error"]},
)
with self.assertRaises(AssertionError):
self.assertFieldOutput(
EmailField, {"[email protected]": "Wrong output"}, {"aaa": error_invalid}
)
with self.assertRaises(AssertionError):
self.assertFieldOutput(
EmailField,
{"[email protected]": "[email protected]"},
{"aaa": ["Come on, gimme some well formatted data, dude."]},
)
def test_custom_required_message(self):
class MyCustomField(IntegerField):
default_error_messages = {
"required": "This is really required.",
}
self.assertFieldOutput(MyCustomField, {}, {}, empty_value=None)
@override_settings(ROOT_URLCONF="test_utils.urls")
class AssertURLEqualTests(SimpleTestCase):
def test_equal(self):
valid_tests = (
("http://example.com/?", "http://example.com/"),
("http://example.com/?x=1&", "http://example.com/?x=1"),
("http://example.com/?x=1&y=2", "http://example.com/?y=2&x=1"),
("http://example.com/?x=1&y=2", "http://example.com/?y=2&x=1"),
(
"http://example.com/?x=1&y=2&a=1&a=2",
"http://example.com/?a=1&a=2&y=2&x=1",
),
("/path/to/?x=1&y=2&z=3", "/path/to/?z=3&y=2&x=1"),
("?x=1&y=2&z=3", "?z=3&y=2&x=1"),
("/test_utils/no_template_used/", reverse_lazy("no_template_used")),
)
for url1, url2 in valid_tests:
with self.subTest(url=url1):
self.assertURLEqual(url1, url2)
def test_not_equal(self):
invalid_tests = (
# Protocol must be the same.
("http://example.com/", "https://example.com/"),
("http://example.com/?x=1&x=2", "https://example.com/?x=2&x=1"),
("http://example.com/?x=1&y=bar&x=2", "https://example.com/?y=bar&x=2&x=1"),
# Parameters of the same name must be in the same order.
("/path/to?a=1&a=2", "/path/to/?a=2&a=1"),
)
for url1, url2 in invalid_tests:
with self.subTest(url=url1), self.assertRaises(AssertionError):
self.assertURLEqual(url1, url2)
def test_message(self):
msg = (
"Expected 'http://example.com/?x=1&x=2' to equal "
"'https://example.com/?x=2&x=1'"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertURLEqual(
"http://example.com/?x=1&x=2", "https://example.com/?x=2&x=1"
)
def test_msg_prefix(self):
msg = (
"Prefix: Expected 'http://example.com/?x=1&x=2' to equal "
"'https://example.com/?x=2&x=1'"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertURLEqual(
"http://example.com/?x=1&x=2",
"https://example.com/?x=2&x=1",
msg_prefix="Prefix: ",
)
class TestForm(Form):
field = CharField()
def clean_field(self):
value = self.cleaned_data.get("field", "")
if value == "invalid":
raise ValidationError("invalid value")
return value
def clean(self):
if self.cleaned_data.get("field") == "invalid_non_field":
raise ValidationError("non-field error")
return self.cleaned_data
@classmethod
def _get_cleaned_form(cls, field_value):
form = cls({"field": field_value})
form.full_clean()
return form
@classmethod
def valid(cls):
return cls._get_cleaned_form("valid")
@classmethod
def invalid(cls, nonfield=False):
return cls._get_cleaned_form("invalid_non_field" if nonfield else "invalid")
class TestFormset(formset_factory(TestForm)):
@classmethod
def _get_cleaned_formset(cls, field_value):
formset = cls(
{
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "0",
"form-0-field": field_value,
}
)
formset.full_clean()
return formset
@classmethod
def valid(cls):
return cls._get_cleaned_formset("valid")
@classmethod
def invalid(cls, nonfield=False, nonform=False):
if nonform:
formset = cls({}, error_messages={"missing_management_form": "error"})
formset.full_clean()
return formset
return cls._get_cleaned_formset("invalid_non_field" if nonfield else "invalid")
class AssertFormErrorTests(SimpleTestCase):
@ignore_warnings(category=RemovedInDjango50Warning)
def test_non_client_response(self):
msg = (
"assertFormError() is only usable on responses fetched using the "
"Django test Client."
)
response = HttpResponse()
with self.assertRaisesMessage(ValueError, msg):
self.assertFormError(response, "form", "field", "invalid value")
@ignore_warnings(category=RemovedInDjango50Warning)
def test_response_with_no_context(self):
msg = "Response did not use any contexts to render the response"
response = mock.Mock(context=[])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormError(response, "form", "field", "invalid value")
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormError(
response,
"form",
"field",
"invalid value",
msg_prefix=msg_prefix,
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_form_not_in_context(self):
msg = "The form 'form' was not used to render the response"
response = mock.Mock(context=[{}])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormError(response, "form", "field", "invalid value")
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormError(
response, "form", "field", "invalid value", msg_prefix=msg_prefix
)
def test_single_error(self):
self.assertFormError(TestForm.invalid(), "field", "invalid value")
def test_error_list(self):
self.assertFormError(TestForm.invalid(), "field", ["invalid value"])
def test_empty_errors_valid_form(self):
self.assertFormError(TestForm.valid(), "field", [])
def test_empty_errors_valid_form_non_field_errors(self):
self.assertFormError(TestForm.valid(), None, [])
def test_field_not_in_form(self):
msg = (
"The form <TestForm bound=True, valid=False, fields=(field)> does not "
"contain the field 'other_field'."
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormError(TestForm.invalid(), "other_field", "invalid value")
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormError(
TestForm.invalid(),
"other_field",
"invalid value",
msg_prefix=msg_prefix,
)
def test_field_with_no_errors(self):
msg = (
"The errors of field 'field' on form <TestForm bound=True, valid=True, "
"fields=(field)> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormError(TestForm.valid(), "field", "invalid value")
self.assertIn("[] != ['invalid value']", str(ctx.exception))
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormError(
TestForm.valid(), "field", "invalid value", msg_prefix=msg_prefix
)
def test_field_with_different_error(self):
msg = (
"The errors of field 'field' on form <TestForm bound=True, valid=False, "
"fields=(field)> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormError(TestForm.invalid(), "field", "other error")
self.assertIn("['invalid value'] != ['other error']", str(ctx.exception))
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormError(
TestForm.invalid(), "field", "other error", msg_prefix=msg_prefix
)
def test_unbound_form(self):
msg = (
"The form <TestForm bound=False, valid=Unknown, fields=(field)> is not "
"bound, it will never have any errors."
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormError(TestForm(), "field", [])
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormError(TestForm(), "field", [], msg_prefix=msg_prefix)
def test_empty_errors_invalid_form(self):
msg = (
"The errors of field 'field' on form <TestForm bound=True, valid=False, "
"fields=(field)> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormError(TestForm.invalid(), "field", [])
self.assertIn("['invalid value'] != []", str(ctx.exception))
def test_non_field_errors(self):
self.assertFormError(TestForm.invalid(nonfield=True), None, "non-field error")
def test_different_non_field_errors(self):
msg = (
"The non-field errors of form <TestForm bound=True, valid=False, "
"fields=(field)> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormError(
TestForm.invalid(nonfield=True), None, "other non-field error"
)
self.assertIn(
"['non-field error'] != ['other non-field error']", str(ctx.exception)
)
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormError(
TestForm.invalid(nonfield=True),
None,
"other non-field error",
msg_prefix=msg_prefix,
)
class AssertFormsetErrorTests(SimpleTestCase):
@ignore_warnings(category=RemovedInDjango50Warning)
def test_non_client_response(self):
msg = (
"assertFormsetError() is only usable on responses fetched using "
"the Django test Client."
)
response = HttpResponse()
with self.assertRaisesMessage(ValueError, msg):
self.assertFormsetError(response, "formset", 0, "field", "invalid value")
@ignore_warnings(category=RemovedInDjango50Warning)
def test_response_with_no_context(self):
msg = "Response did not use any contexts to render the response"
response = mock.Mock(context=[])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(response, "formset", 0, "field", "invalid value")
@ignore_warnings(category=RemovedInDjango50Warning)
def test_formset_not_in_context(self):
msg = "The formset 'formset' was not used to render the response"
response = mock.Mock(context=[{}])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(response, "formset", 0, "field", "invalid value")
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormsetError(
response, "formset", 0, "field", "invalid value", msg_prefix=msg_prefix
)
def test_single_error(self):
self.assertFormsetError(TestFormset.invalid(), 0, "field", "invalid value")
def test_error_list(self):
self.assertFormsetError(TestFormset.invalid(), 0, "field", ["invalid value"])
def test_empty_errors_valid_formset(self):
self.assertFormsetError(TestFormset.valid(), 0, "field", [])
def test_multiple_forms(self):
formset = TestFormset(
{
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-0-field": "valid",
"form-1-field": "invalid",
}
)
formset.full_clean()
self.assertFormsetError(formset, 0, "field", [])
self.assertFormsetError(formset, 1, "field", ["invalid value"])
def test_field_not_in_form(self):
msg = (
"The form 0 of formset <TestFormset: bound=True valid=False total_forms=1> "
"does not contain the field 'other_field'."
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(
TestFormset.invalid(), 0, "other_field", "invalid value"
)
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormsetError(
TestFormset.invalid(),
0,
"other_field",
"invalid value",
msg_prefix=msg_prefix,
)
def test_field_with_no_errors(self):
msg = (
"The errors of field 'field' on form 0 of formset <TestFormset: bound=True "
"valid=True total_forms=1> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormsetError(TestFormset.valid(), 0, "field", "invalid value")
self.assertIn("[] != ['invalid value']", str(ctx.exception))
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormsetError(
TestFormset.valid(), 0, "field", "invalid value", msg_prefix=msg_prefix
)
def test_field_with_different_error(self):
msg = (
"The errors of field 'field' on form 0 of formset <TestFormset: bound=True "
"valid=False total_forms=1> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormsetError(TestFormset.invalid(), 0, "field", "other error")
self.assertIn("['invalid value'] != ['other error']", str(ctx.exception))
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormsetError(
TestFormset.invalid(), 0, "field", "other error", msg_prefix=msg_prefix
)
def test_unbound_formset(self):
msg = (
"The formset <TestFormset: bound=False valid=Unknown total_forms=1> is not "
"bound, it will never have any errors."
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(TestFormset(), 0, "field", [])
def test_empty_errors_invalid_formset(self):
msg = (
"The errors of field 'field' on form 0 of formset <TestFormset: bound=True "
"valid=False total_forms=1> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormsetError(TestFormset.invalid(), 0, "field", [])
self.assertIn("['invalid value'] != []", str(ctx.exception))
def test_non_field_errors(self):
self.assertFormsetError(
TestFormset.invalid(nonfield=True), 0, None, "non-field error"
)
def test_different_non_field_errors(self):
msg = (
"The non-field errors of form 0 of formset <TestFormset: bound=True "
"valid=False total_forms=1> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormsetError(
TestFormset.invalid(nonfield=True), 0, None, "other non-field error"
)
self.assertIn(
"['non-field error'] != ['other non-field error']", str(ctx.exception)
)
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormsetError(
TestFormset.invalid(nonfield=True),
0,
None,
"other non-field error",
msg_prefix=msg_prefix,
)
def test_no_non_field_errors(self):
msg = (
"The non-field errors of form 0 of formset <TestFormset: bound=True "
"valid=False total_forms=1> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormsetError(TestFormset.invalid(), 0, None, "non-field error")
self.assertIn("[] != ['non-field error']", str(ctx.exception))
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormsetError(
TestFormset.invalid(), 0, None, "non-field error", msg_prefix=msg_prefix
)
def test_non_form_errors(self):
self.assertFormsetError(TestFormset.invalid(nonform=True), None, None, "error")
def test_different_non_form_errors(self):
msg = (
"The non-form errors of formset <TestFormset: bound=True valid=False "
"total_forms=0> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormsetError(
TestFormset.invalid(nonform=True), None, None, "other error"
)
self.assertIn("['error'] != ['other error']", str(ctx.exception))
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormsetError(
TestFormset.invalid(nonform=True),
None,
None,
"other error",
msg_prefix=msg_prefix,
)
def test_no_non_form_errors(self):
msg = (
"The non-form errors of formset <TestFormset: bound=True valid=False "
"total_forms=1> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormsetError(TestFormset.invalid(), None, None, "error")
self.assertIn("[] != ['error']", str(ctx.exception))
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormsetError(
TestFormset.invalid(),
None,
None,
"error",
msg_prefix=msg_prefix,
)
def test_non_form_errors_with_field(self):
msg = "You must use field=None with form_index=None."
with self.assertRaisesMessage(ValueError, msg):
self.assertFormsetError(
TestFormset.invalid(nonform=True), None, "field", "error"
)
def test_form_index_too_big(self):
msg = (
"The formset <TestFormset: bound=True valid=False total_forms=1> only has "
"1 form."
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(TestFormset.invalid(), 2, "field", "error")
def test_form_index_too_big_plural(self):
formset = TestFormset(
{
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-0-field": "valid",
"form-1-field": "valid",
}
)
formset.full_clean()
msg = (
"The formset <TestFormset: bound=True valid=True total_forms=2> only has 2 "
"forms."
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(formset, 2, "field", "error")
# RemovedInDjango50Warning
class AssertFormErrorDeprecationTests(SimpleTestCase):
"""
Exhaustively test all possible combinations of args/kwargs for the old
signature.
"""
@ignore_warnings(category=RemovedInDjango50Warning)
def test_assert_form_error_errors_none(self):
msg = (
"The errors of field 'field' on form <TestForm bound=True, valid=False, "
"fields=(field)> don't match."
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormError(TestForm.invalid(), "field", None)
def test_assert_form_error_errors_none_warning(self):
msg = (
"Passing errors=None to assertFormError() is deprecated, use "
"errors=[] instead."
)
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
self.assertFormError(TestForm.valid(), "field", None)
def _assert_form_error_old_api_cases(self, form, field, errors, msg_prefix):
response = mock.Mock(context=[{"form": TestForm.invalid()}])
return (
((response, form, field, errors), {}),
((response, form, field, errors, msg_prefix), {}),
((response, form, field, errors), {"msg_prefix": msg_prefix}),
((response, form, field), {"errors": errors}),
((response, form, field), {"errors": errors, "msg_prefix": msg_prefix}),
((response, form), {"field": field, "errors": errors}),
(
(response, form),
{"field": field, "errors": errors, "msg_prefix": msg_prefix},
),
((response,), {"form": form, "field": field, "errors": errors}),
(
(response,),
{
"form": form,
"field": field,
"errors": errors,
"msg_prefix": msg_prefix,
},
),
(
(),
{"response": response, "form": form, "field": field, "errors": errors},
),
(
(),
{
"response": response,
"form": form,
"field": field,
"errors": errors,
"msg_prefix": msg_prefix,
},
),
)
def test_assert_form_error_old_api(self):
deprecation_msg = (
"Passing response to assertFormError() is deprecated. Use the form object "
"directly: assertFormError(response.context['form'], 'field', ...)"
)
for args, kwargs in self._assert_form_error_old_api_cases(
form="form",
field="field",
errors=["invalid value"],
msg_prefix="Custom prefix",
):
with self.subTest(args=args, kwargs=kwargs):
with self.assertWarnsMessage(RemovedInDjango50Warning, deprecation_msg):
self.assertFormError(*args, **kwargs)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_assert_form_error_old_api_assertion_error(self):
for args, kwargs in self._assert_form_error_old_api_cases(
form="form",
field="field",
errors=["other error"],
msg_prefix="Custom prefix",
):
with self.subTest(args=args, kwargs=kwargs):
with self.assertRaises(AssertionError):
self.assertFormError(*args, **kwargs)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_assert_formset_error_errors_none(self):
msg = (
"The errors of field 'field' on form 0 of formset <TestFormset: bound=True "
"valid=False total_forms=1> don't match."
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(TestFormset.invalid(), 0, "field", None)
def test_assert_formset_error_errors_none_warning(self):
msg = (
"Passing errors=None to assertFormsetError() is deprecated, use "
"errors=[] instead."
)
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
self.assertFormsetError(TestFormset.valid(), 0, "field", None)
def _assert_formset_error_old_api_cases(
self, formset, form_index, field, errors, msg_prefix
):
response = mock.Mock(context=[{"formset": TestFormset.invalid()}])
return (
((response, formset, form_index, field, errors), {}),
((response, formset, form_index, field, errors, msg_prefix), {}),
(
(response, formset, form_index, field, errors),
{"msg_prefix": msg_prefix},
),
((response, formset, form_index, field), {"errors": errors}),
(
(response, formset, form_index, field),
{"errors": errors, "msg_prefix": msg_prefix},
),
((response, formset, form_index), {"field": field, "errors": errors}),
(
(response, formset, form_index),
{"field": field, "errors": errors, "msg_prefix": msg_prefix},
),
(
(response, formset),
{"form_index": form_index, "field": field, "errors": errors},
),
(
(response, formset),
{
"form_index": form_index,
"field": field,
"errors": errors,
"msg_prefix": msg_prefix,
},
),
(
(response,),
{
"formset": formset,
"form_index": form_index,
"field": field,
"errors": errors,
},
),
(
(response,),
{
"formset": formset,
"form_index": form_index,
"field": field,
"errors": errors,
"msg_prefix": msg_prefix,
},
),
(
(),
{
"response": response,
"formset": formset,
"form_index": form_index,
"field": field,
"errors": errors,
},
),
(
(),
{
"response": response,
"formset": formset,
"form_index": form_index,
"field": field,
"errors": errors,
"msg_prefix": msg_prefix,
},
),
)
def test_assert_formset_error_old_api(self):
deprecation_msg = (
"Passing response to assertFormsetError() is deprecated. Use the formset "
"object directly: assertFormsetError(response.context['formset'], 0, ...)"
)
for args, kwargs in self._assert_formset_error_old_api_cases(
formset="formset",
form_index=0,
field="field",
errors=["invalid value"],
msg_prefix="Custom prefix",
):
with self.subTest(args=args, kwargs=kwargs):
with self.assertWarnsMessage(RemovedInDjango50Warning, deprecation_msg):
self.assertFormsetError(*args, **kwargs)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_assert_formset_error_old_api_assertion_error(self):
for args, kwargs in self._assert_formset_error_old_api_cases(
formset="formset",
form_index=0,
field="field",
errors=["other error"],
msg_prefix="Custom prefix",
):
with self.subTest(args=args, kwargs=kwargs):
with self.assertRaises(AssertionError):
self.assertFormsetError(*args, **kwargs)
class FirstUrls:
urlpatterns = [path("first/", empty_response, name="first")]
class SecondUrls:
urlpatterns = [path("second/", empty_response, name="second")]
class SetupTestEnvironmentTests(SimpleTestCase):
def test_setup_test_environment_calling_more_than_once(self):
with self.assertRaisesMessage(
RuntimeError, "setup_test_environment() was already called"
):
setup_test_environment()
def test_allowed_hosts(self):
for type_ in (list, tuple):
with self.subTest(type_=type_):
allowed_hosts = type_("*")
with mock.patch("django.test.utils._TestState") as x:
del x.saved_data
with self.settings(ALLOWED_HOSTS=allowed_hosts):
setup_test_environment()
self.assertEqual(settings.ALLOWED_HOSTS, ["*", "testserver"])
class OverrideSettingsTests(SimpleTestCase):
# #21518 -- If neither override_settings nor a setting_changed receiver
# clears the URL cache between tests, then one of test_first or
# test_second will fail.
@override_settings(ROOT_URLCONF=FirstUrls)
def test_urlconf_first(self):
reverse("first")
@override_settings(ROOT_URLCONF=SecondUrls)
def test_urlconf_second(self):
reverse("second")
def test_urlconf_cache(self):
with self.assertRaises(NoReverseMatch):
reverse("first")
with self.assertRaises(NoReverseMatch):
reverse("second")
with override_settings(ROOT_URLCONF=FirstUrls):
self.client.get(reverse("first"))
with self.assertRaises(NoReverseMatch):
reverse("second")
with override_settings(ROOT_URLCONF=SecondUrls):
with self.assertRaises(NoReverseMatch):
reverse("first")
self.client.get(reverse("second"))
self.client.get(reverse("first"))
with self.assertRaises(NoReverseMatch):
reverse("second")
with self.assertRaises(NoReverseMatch):
reverse("first")
with self.assertRaises(NoReverseMatch):
reverse("second")
def test_override_media_root(self):
"""
Overriding the MEDIA_ROOT setting should be reflected in the
base_location attribute of django.core.files.storage.default_storage.
"""
self.assertEqual(default_storage.base_location, "")
with self.settings(MEDIA_ROOT="test_value"):
self.assertEqual(default_storage.base_location, "test_value")
def test_override_media_url(self):
"""
Overriding the MEDIA_URL setting should be reflected in the
base_url attribute of django.core.files.storage.default_storage.
"""
self.assertEqual(default_storage.base_location, "")
with self.settings(MEDIA_URL="/test_value/"):
self.assertEqual(default_storage.base_url, "/test_value/")
def test_override_file_upload_permissions(self):
"""
Overriding the FILE_UPLOAD_PERMISSIONS setting should be reflected in
the file_permissions_mode attribute of
django.core.files.storage.default_storage.
"""
self.assertEqual(default_storage.file_permissions_mode, 0o644)
with self.settings(FILE_UPLOAD_PERMISSIONS=0o777):
self.assertEqual(default_storage.file_permissions_mode, 0o777)
def test_override_file_upload_directory_permissions(self):
"""
Overriding the FILE_UPLOAD_DIRECTORY_PERMISSIONS setting should be
reflected in the directory_permissions_mode attribute of
django.core.files.storage.default_storage.
"""
self.assertIsNone(default_storage.directory_permissions_mode)
with self.settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777):
self.assertEqual(default_storage.directory_permissions_mode, 0o777)
def test_override_database_routers(self):
"""
Overriding DATABASE_ROUTERS should update the base router.
"""
test_routers = [object()]
with self.settings(DATABASE_ROUTERS=test_routers):
self.assertEqual(router.routers, test_routers)
def test_override_static_url(self):
"""
Overriding the STATIC_URL setting should be reflected in the
base_url attribute of
django.contrib.staticfiles.storage.staticfiles_storage.
"""
with self.settings(STATIC_URL="/test/"):
self.assertEqual(staticfiles_storage.base_url, "/test/")
def test_override_static_root(self):
"""
Overriding the STATIC_ROOT setting should be reflected in the
location attribute of
django.contrib.staticfiles.storage.staticfiles_storage.
"""
with self.settings(STATIC_ROOT="/tmp/test"):
self.assertEqual(staticfiles_storage.location, os.path.abspath("/tmp/test"))
def test_override_staticfiles_storage(self):
"""
Overriding the STATICFILES_STORAGE setting should be reflected in
the value of django.contrib.staticfiles.storage.staticfiles_storage.
"""
new_class = "ManifestStaticFilesStorage"
new_storage = "django.contrib.staticfiles.storage." + new_class
with self.settings(STATICFILES_STORAGE=new_storage):
self.assertEqual(staticfiles_storage.__class__.__name__, new_class)
def test_override_staticfiles_finders(self):
"""
Overriding the STATICFILES_FINDERS setting should be reflected in
the return value of django.contrib.staticfiles.finders.get_finders.
"""
current = get_finders()
self.assertGreater(len(list(current)), 1)
finders = ["django.contrib.staticfiles.finders.FileSystemFinder"]
with self.settings(STATICFILES_FINDERS=finders):
self.assertEqual(len(list(get_finders())), len(finders))
def test_override_staticfiles_dirs(self):
"""
Overriding the STATICFILES_DIRS setting should be reflected in
the locations attribute of the
django.contrib.staticfiles.finders.FileSystemFinder instance.
"""
finder = get_finder("django.contrib.staticfiles.finders.FileSystemFinder")
test_path = "/tmp/test"
expected_location = ("", test_path)
self.assertNotIn(expected_location, finder.locations)
with self.settings(STATICFILES_DIRS=[test_path]):
finder = get_finder("django.contrib.staticfiles.finders.FileSystemFinder")
self.assertIn(expected_location, finder.locations)
@skipUnlessDBFeature("supports_transactions")
class TestBadSetUpTestData(TestCase):
"""
An exception in setUpTestData() shouldn't leak a transaction which would
cascade across the rest of the test suite.
"""
class MyException(Exception):
pass
@classmethod
def setUpClass(cls):
try:
super().setUpClass()
except cls.MyException:
cls._in_atomic_block = connection.in_atomic_block
@classmethod
def tearDownClass(Cls):
# override to avoid a second cls._rollback_atomics() which would fail.
# Normal setUpClass() methods won't have exception handling so this
# method wouldn't typically be run.
pass
@classmethod
def setUpTestData(cls):
# Simulate a broken setUpTestData() method.
raise cls.MyException()
def test_failure_in_setUpTestData_should_rollback_transaction(self):
# setUpTestData() should call _rollback_atomics() so that the
# transaction doesn't leak.
self.assertFalse(self._in_atomic_block)
@skipUnlessDBFeature("supports_transactions")
class CaptureOnCommitCallbacksTests(TestCase):
databases = {"default", "other"}
callback_called = False
def enqueue_callback(self, using="default"):
def hook():
self.callback_called = True
transaction.on_commit(hook, using=using)
def test_no_arguments(self):
with self.captureOnCommitCallbacks() as callbacks:
self.enqueue_callback()
self.assertEqual(len(callbacks), 1)
self.assertIs(self.callback_called, False)
callbacks[0]()
self.assertIs(self.callback_called, True)
def test_using(self):
with self.captureOnCommitCallbacks(using="other") as callbacks:
self.enqueue_callback(using="other")
self.assertEqual(len(callbacks), 1)
self.assertIs(self.callback_called, False)
callbacks[0]()
self.assertIs(self.callback_called, True)
def test_different_using(self):
with self.captureOnCommitCallbacks(using="default") as callbacks:
self.enqueue_callback(using="other")
self.assertEqual(callbacks, [])
def test_execute(self):
with self.captureOnCommitCallbacks(execute=True) as callbacks:
self.enqueue_callback()
self.assertEqual(len(callbacks), 1)
self.assertIs(self.callback_called, True)
def test_pre_callback(self):
def pre_hook():
pass
transaction.on_commit(pre_hook, using="default")
with self.captureOnCommitCallbacks() as callbacks:
self.enqueue_callback()
self.assertEqual(len(callbacks), 1)
self.assertNotEqual(callbacks[0], pre_hook)
def test_with_rolled_back_savepoint(self):
with self.captureOnCommitCallbacks() as callbacks:
try:
with transaction.atomic():
self.enqueue_callback()
raise IntegrityError
except IntegrityError:
# Inner transaction.atomic() has been rolled back.
pass
self.assertEqual(callbacks, [])
def test_execute_recursive(self):
with self.captureOnCommitCallbacks(execute=True) as callbacks:
transaction.on_commit(self.enqueue_callback)
self.assertEqual(len(callbacks), 2)
self.assertIs(self.callback_called, True)
def test_execute_tree(self):
"""
A visualisation of the callback tree tested. Each node is expected to
be visited only once:
└─branch_1
├─branch_2
│ ├─leaf_1
│ └─leaf_2
└─leaf_3
"""
branch_1_call_counter = 0
branch_2_call_counter = 0
leaf_1_call_counter = 0
leaf_2_call_counter = 0
leaf_3_call_counter = 0
def leaf_1():
nonlocal leaf_1_call_counter
leaf_1_call_counter += 1
def leaf_2():
nonlocal leaf_2_call_counter
leaf_2_call_counter += 1
def leaf_3():
nonlocal leaf_3_call_counter
leaf_3_call_counter += 1
def branch_1():
nonlocal branch_1_call_counter
branch_1_call_counter += 1
transaction.on_commit(branch_2)
transaction.on_commit(leaf_3)
def branch_2():
nonlocal branch_2_call_counter
branch_2_call_counter += 1
transaction.on_commit(leaf_1)
transaction.on_commit(leaf_2)
with self.captureOnCommitCallbacks(execute=True) as callbacks:
transaction.on_commit(branch_1)
self.assertEqual(branch_1_call_counter, 1)
self.assertEqual(branch_2_call_counter, 1)
self.assertEqual(leaf_1_call_counter, 1)
self.assertEqual(leaf_2_call_counter, 1)
self.assertEqual(leaf_3_call_counter, 1)
self.assertEqual(callbacks, [branch_1, branch_2, leaf_3, leaf_1, leaf_2])
def test_execute_robust(self):
class MyException(Exception):
pass
def hook():
self.callback_called = True
raise MyException("robust callback")
with self.assertLogs("django.test", "ERROR") as cm:
with self.captureOnCommitCallbacks(execute=True) as callbacks:
transaction.on_commit(hook, robust=True)
self.assertEqual(len(callbacks), 1)
self.assertIs(self.callback_called, True)
log_record = cm.records[0]
self.assertEqual(
log_record.getMessage(),
"Error calling CaptureOnCommitCallbacksTests.test_execute_robust.<locals>."
"hook in on_commit() (robust callback).",
)
self.assertIsNotNone(log_record.exc_info)
raised_exception = log_record.exc_info[1]
self.assertIsInstance(raised_exception, MyException)
self.assertEqual(str(raised_exception), "robust callback")
class DisallowedDatabaseQueriesTests(SimpleTestCase):
def test_disallowed_database_connections(self):
expected_message = (
"Database connections to 'default' are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to "
"ensure proper test isolation or add 'default' to "
"test_utils.tests.DisallowedDatabaseQueriesTests.databases to "
"silence this failure."
)
with self.assertRaisesMessage(DatabaseOperationForbidden, expected_message):
connection.connect()
with self.assertRaisesMessage(DatabaseOperationForbidden, expected_message):
connection.temporary_connection()
def test_disallowed_database_queries(self):
expected_message = (
"Database queries to 'default' are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to "
"ensure proper test isolation or add 'default' to "
"test_utils.tests.DisallowedDatabaseQueriesTests.databases to "
"silence this failure."
)
with self.assertRaisesMessage(DatabaseOperationForbidden, expected_message):
Car.objects.first()
def test_disallowed_database_chunked_cursor_queries(self):
expected_message = (
"Database queries to 'default' are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to "
"ensure proper test isolation or add 'default' to "
"test_utils.tests.DisallowedDatabaseQueriesTests.databases to "
"silence this failure."
)
with self.assertRaisesMessage(DatabaseOperationForbidden, expected_message):
next(Car.objects.iterator())
class AllowedDatabaseQueriesTests(SimpleTestCase):
databases = {"default"}
def test_allowed_database_queries(self):
Car.objects.first()
def test_allowed_database_chunked_cursor_queries(self):
next(Car.objects.iterator(), None)
class DatabaseAliasTests(SimpleTestCase):
def setUp(self):
self.addCleanup(setattr, self.__class__, "databases", self.databases)
def test_no_close_match(self):
self.__class__.databases = {"void"}
message = (
"test_utils.tests.DatabaseAliasTests.databases refers to 'void' which is "
"not defined in settings.DATABASES."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
self._validate_databases()
def test_close_match(self):
self.__class__.databases = {"defualt"}
message = (
"test_utils.tests.DatabaseAliasTests.databases refers to 'defualt' which "
"is not defined in settings.DATABASES. Did you mean 'default'?"
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
self._validate_databases()
def test_match(self):
self.__class__.databases = {"default", "other"}
self.assertEqual(self._validate_databases(), frozenset({"default", "other"}))
def test_all(self):
self.__class__.databases = "__all__"
self.assertEqual(self._validate_databases(), frozenset(connections))
@isolate_apps("test_utils", attr_name="class_apps")
class IsolatedAppsTests(SimpleTestCase):
def test_installed_apps(self):
self.assertEqual(
[app_config.label for app_config in self.class_apps.get_app_configs()],
["test_utils"],
)
def test_class_decoration(self):
class ClassDecoration(models.Model):
pass
self.assertEqual(ClassDecoration._meta.apps, self.class_apps)
@isolate_apps("test_utils", kwarg_name="method_apps")
def test_method_decoration(self, method_apps):
class MethodDecoration(models.Model):
pass
self.assertEqual(MethodDecoration._meta.apps, method_apps)
def test_context_manager(self):
with isolate_apps("test_utils") as context_apps:
class ContextManager(models.Model):
pass
self.assertEqual(ContextManager._meta.apps, context_apps)
@isolate_apps("test_utils", kwarg_name="method_apps")
def test_nested(self, method_apps):
class MethodDecoration(models.Model):
pass
with isolate_apps("test_utils") as context_apps:
class ContextManager(models.Model):
pass
with isolate_apps("test_utils") as nested_context_apps:
class NestedContextManager(models.Model):
pass
self.assertEqual(MethodDecoration._meta.apps, method_apps)
self.assertEqual(ContextManager._meta.apps, context_apps)
self.assertEqual(NestedContextManager._meta.apps, nested_context_apps)
class DoNothingDecorator(TestContextDecorator):
def enable(self):
pass
def disable(self):
pass
class TestContextDecoratorTests(SimpleTestCase):
@mock.patch.object(DoNothingDecorator, "disable")
def test_exception_in_setup(self, mock_disable):
"""An exception is setUp() is reraised after disable() is called."""
class ExceptionInSetUp(unittest.TestCase):
def setUp(self):
raise NotImplementedError("reraised")
decorator = DoNothingDecorator()
decorated_test_class = decorator.__call__(ExceptionInSetUp)()
self.assertFalse(mock_disable.called)
with self.assertRaisesMessage(NotImplementedError, "reraised"):
decorated_test_class.setUp()
decorated_test_class.doCleanups()
self.assertTrue(mock_disable.called)
def test_cleanups_run_after_tearDown(self):
calls = []
class SaveCallsDecorator(TestContextDecorator):
def enable(self):
calls.append("enable")
def disable(self):
calls.append("disable")
class AddCleanupInSetUp(unittest.TestCase):
def setUp(self):
calls.append("setUp")
self.addCleanup(lambda: calls.append("cleanup"))
decorator = SaveCallsDecorator()
decorated_test_class = decorator.__call__(AddCleanupInSetUp)()
decorated_test_class.setUp()
decorated_test_class.tearDown()
decorated_test_class.doCleanups()
self.assertEqual(calls, ["enable", "setUp", "cleanup", "disable"])
|
4758318b6d178c11a341af3e35f4dfa4c69dfe49e8039dcd60753227f8f8f616 | import datetime
import os
import re
import unittest
from unittest import mock
from urllib.parse import parse_qsl, urljoin, urlparse
try:
import zoneinfo
except ImportError:
from backports import zoneinfo
try:
import pytz
except ImportError:
pytz = None
from django.contrib import admin
from django.contrib.admin import AdminSite, ModelAdmin
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.contrib.admin.models import ADDITION, DELETION, LogEntry
from django.contrib.admin.options import TO_FIELD_VAR
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.admin.utils import quote
from django.contrib.admin.views.main import IS_POPUP_VAR
from django.contrib.auth import REDIRECT_FIELD_NAME, get_permission_codename
from django.contrib.auth.models import Group, Permission, User
from django.contrib.contenttypes.models import ContentType
from django.core import mail
from django.core.checks import Error
from django.core.files import temp as tempfile
from django.db import connection
from django.forms.utils import ErrorList
from django.template.response import TemplateResponse
from django.test import (
TestCase,
modify_settings,
override_settings,
skipUnlessDBFeature,
)
from django.test.utils import override_script_prefix
from django.urls import NoReverseMatch, resolve, reverse
from django.utils import formats, translation
from django.utils.cache import get_max_age
from django.utils.encoding import iri_to_uri
from django.utils.html import escape
from django.utils.http import urlencode
from . import customadmin
from .admin import CityAdmin, site, site2
from .models import (
Actor,
AdminOrderedAdminMethod,
AdminOrderedCallable,
AdminOrderedField,
AdminOrderedModelMethod,
Album,
Answer,
Answer2,
Article,
BarAccount,
Book,
Bookmark,
Box,
Category,
Chapter,
ChapterXtra1,
ChapterXtra2,
Character,
Child,
Choice,
City,
Collector,
Color,
ComplexSortedPerson,
CoverLetter,
CustomArticle,
CyclicOne,
CyclicTwo,
DooHickey,
Employee,
EmptyModel,
Fabric,
FancyDoodad,
FieldOverridePost,
FilteredManager,
FooAccount,
FoodDelivery,
FunkyTag,
Gallery,
Grommet,
Inquisition,
Language,
Link,
MainPrepopulated,
Media,
ModelWithStringPrimaryKey,
OtherStory,
Paper,
Parent,
ParentWithDependentChildren,
ParentWithUUIDPK,
Person,
Persona,
Picture,
Pizza,
Plot,
PlotDetails,
PluggableSearchPerson,
Podcast,
Post,
PrePopulatedPost,
Promo,
Question,
ReadablePizza,
ReadOnlyPizza,
ReadOnlyRelatedField,
Recommendation,
Recommender,
RelatedPrepopulated,
RelatedWithUUIDPKModel,
Report,
Restaurant,
RowLevelChangePermissionModel,
SecretHideout,
Section,
ShortMessage,
Simple,
Song,
State,
Story,
SuperSecretHideout,
SuperVillain,
Telegram,
TitleTranslation,
Topping,
Traveler,
UnchangeableObject,
UndeletableObject,
UnorderedObject,
UserProxy,
Villain,
Vodcast,
Whatsit,
Widget,
Worker,
WorkHour,
)
ERROR_MESSAGE = "Please enter the correct username and password \
for a staff account. Note that both fields may be case-sensitive."
MULTIPART_ENCTYPE = 'enctype="multipart/form-data"'
def make_aware_datetimes(dt, iana_key):
"""Makes one aware datetime for each supported time zone provider."""
yield dt.replace(tzinfo=zoneinfo.ZoneInfo(iana_key))
if pytz is not None:
yield pytz.timezone(iana_key).localize(dt, is_dst=None)
class AdminFieldExtractionMixin:
"""
Helper methods for extracting data from AdminForm.
"""
def get_admin_form_fields(self, response):
"""
Return a list of AdminFields for the AdminForm in the response.
"""
fields = []
for fieldset in response.context["adminform"]:
for field_line in fieldset:
fields.extend(field_line)
return fields
def get_admin_readonly_fields(self, response):
"""
Return the readonly fields for the response's AdminForm.
"""
return [f for f in self.get_admin_form_fields(response) if f.is_readonly]
def get_admin_readonly_field(self, response, field_name):
"""
Return the readonly field for the given field_name.
"""
admin_readonly_fields = self.get_admin_readonly_fields(response)
for field in admin_readonly_fields:
if field.field["name"] == field_name:
return field
@override_settings(ROOT_URLCONF="admin_views.urls", USE_I18N=True, LANGUAGE_CODE="en")
class AdminViewBasicTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
title="Article 1",
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
title="Article 2",
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.color1 = Color.objects.create(value="Red", warm=True)
cls.color2 = Color.objects.create(value="Orange", warm=True)
cls.color3 = Color.objects.create(value="Blue", warm=False)
cls.color4 = Color.objects.create(value="Green", warm=False)
cls.fab1 = Fabric.objects.create(surface="x")
cls.fab2 = Fabric.objects.create(surface="y")
cls.fab3 = Fabric.objects.create(surface="plain")
cls.b1 = Book.objects.create(name="Book 1")
cls.b2 = Book.objects.create(name="Book 2")
cls.pro1 = Promo.objects.create(name="Promo 1", book=cls.b1)
cls.pro1 = Promo.objects.create(name="Promo 2", book=cls.b2)
cls.chap1 = Chapter.objects.create(
title="Chapter 1", content="[ insert contents here ]", book=cls.b1
)
cls.chap2 = Chapter.objects.create(
title="Chapter 2", content="[ insert contents here ]", book=cls.b1
)
cls.chap3 = Chapter.objects.create(
title="Chapter 1", content="[ insert contents here ]", book=cls.b2
)
cls.chap4 = Chapter.objects.create(
title="Chapter 2", content="[ insert contents here ]", book=cls.b2
)
cls.cx1 = ChapterXtra1.objects.create(chap=cls.chap1, xtra="ChapterXtra1 1")
cls.cx2 = ChapterXtra1.objects.create(chap=cls.chap3, xtra="ChapterXtra1 2")
Actor.objects.create(name="Palin", age=27)
# Post data for edit inline
cls.inline_post_data = {
"name": "Test section",
# inline data
"article_set-TOTAL_FORMS": "6",
"article_set-INITIAL_FORMS": "3",
"article_set-MAX_NUM_FORMS": "0",
"article_set-0-id": cls.a1.pk,
# there is no title in database, give one here or formset will fail.
"article_set-0-title": "Norske bostaver æøå skaper problemer",
"article_set-0-content": "<p>Middle content</p>",
"article_set-0-date_0": "2008-03-18",
"article_set-0-date_1": "11:54:58",
"article_set-0-section": cls.s1.pk,
"article_set-1-id": cls.a2.pk,
"article_set-1-title": "Need a title.",
"article_set-1-content": "<p>Oldest content</p>",
"article_set-1-date_0": "2000-03-18",
"article_set-1-date_1": "11:54:58",
"article_set-2-id": cls.a3.pk,
"article_set-2-title": "Need a title.",
"article_set-2-content": "<p>Newest content</p>",
"article_set-2-date_0": "2009-03-18",
"article_set-2-date_1": "11:54:58",
"article_set-3-id": "",
"article_set-3-title": "",
"article_set-3-content": "",
"article_set-3-date_0": "",
"article_set-3-date_1": "",
"article_set-4-id": "",
"article_set-4-title": "",
"article_set-4-content": "",
"article_set-4-date_0": "",
"article_set-4-date_1": "",
"article_set-5-id": "",
"article_set-5-title": "",
"article_set-5-content": "",
"article_set-5-date_0": "",
"article_set-5-date_1": "",
}
def setUp(self):
self.client.force_login(self.superuser)
def assertContentBefore(self, response, text1, text2, failing_msg=None):
"""
Testing utility asserting that text1 appears before text2 in response
content.
"""
self.assertEqual(response.status_code, 200)
self.assertLess(
response.content.index(text1.encode()),
response.content.index(text2.encode()),
(failing_msg or "")
+ "\nResponse:\n"
+ response.content.decode(response.charset),
)
class AdminViewBasicTest(AdminViewBasicTestCase):
def test_trailing_slash_required(self):
"""
If you leave off the trailing slash, app should redirect and add it.
"""
add_url = reverse("admin:admin_views_article_add")
response = self.client.get(add_url[:-1])
self.assertRedirects(response, add_url, status_code=301)
def test_basic_add_GET(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
def test_add_with_GET_args(self):
response = self.client.get(
reverse("admin:admin_views_section_add"), {"name": "My Section"}
)
self.assertContains(
response,
'value="My Section"',
msg_prefix="Couldn't find an input with the right value in the response",
)
def test_basic_edit_GET(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
def test_basic_edit_GET_string_PK(self):
"""
GET on the change_view (when passing a string as the PK argument for a
model with an integer PK field) redirects to the index page with a
message saying the object doesn't exist.
"""
response = self.client.get(
reverse("admin:admin_views_section_change", args=(quote("abc/<b>"),)),
follow=True,
)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["section with ID “abc/<b>” doesn’t exist. Perhaps it was deleted?"],
)
def test_basic_edit_GET_old_url_redirect(self):
"""
The change URL changed in Django 1.9, but the old one still redirects.
"""
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)).replace(
"change/", ""
)
)
self.assertRedirects(
response, reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
def test_basic_inheritance_GET_string_PK(self):
"""
GET on the change_view (for inherited models) redirects to the index
page with a message saying the object doesn't exist.
"""
response = self.client.get(
reverse("admin:admin_views_supervillain_change", args=("abc",)), follow=True
)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["super villain with ID “abc” doesn’t exist. Perhaps it was deleted?"],
)
def test_basic_add_POST(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "Another Section",
# inline data
"article_set-TOTAL_FORMS": "3",
"article_set-INITIAL_FORMS": "0",
"article_set-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse("admin:admin_views_section_add"), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_popup_add_POST(self):
"""HTTP response from a popup is properly escaped."""
post_data = {
IS_POPUP_VAR: "1",
"title": "title with a new\nline",
"content": "some content",
"date_0": "2010-09-10",
"date_1": "14:55:39",
}
response = self.client.post(reverse("admin:admin_views_article_add"), post_data)
self.assertContains(response, "title with a new\\nline")
def test_basic_edit_POST(self):
"""
A smoke test to ensure POST on edit_view works.
"""
url = reverse("admin:admin_views_section_change", args=(self.s1.pk,))
response = self.client.post(url, self.inline_post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_edit_save_as(self):
"""
Test "save as".
"""
post_data = self.inline_post_data.copy()
post_data.update(
{
"_saveasnew": "Save+as+new",
"article_set-1-section": "1",
"article_set-2-section": "1",
"article_set-3-section": "1",
"article_set-4-section": "1",
"article_set-5-section": "1",
}
)
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), post_data
)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_edit_save_as_delete_inline(self):
"""
Should be able to "Save as new" while also deleting an inline.
"""
post_data = self.inline_post_data.copy()
post_data.update(
{
"_saveasnew": "Save+as+new",
"article_set-1-section": "1",
"article_set-2-section": "1",
"article_set-2-DELETE": "1",
"article_set-3-section": "1",
}
)
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), post_data
)
self.assertEqual(response.status_code, 302)
# started with 3 articles, one was deleted.
self.assertEqual(Section.objects.latest("id").article_set.count(), 2)
def test_change_list_column_field_classes(self):
response = self.client.get(reverse("admin:admin_views_article_changelist"))
# callables display the callable name.
self.assertContains(response, "column-callable_year")
self.assertContains(response, "field-callable_year")
# lambdas display as "lambda" + index that they appear in list_display.
self.assertContains(response, "column-lambda8")
self.assertContains(response, "field-lambda8")
def test_change_list_sorting_callable(self):
"""
Ensure we can sort on a list_display field that is a callable
(column 2 is callable_year in ArticleAdmin)
"""
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": 2}
)
self.assertContentBefore(
response,
"Oldest content",
"Middle content",
"Results of sorting on callable are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Newest content",
"Results of sorting on callable are out of order.",
)
def test_change_list_sorting_property(self):
"""
Sort on a list_display field that is a property (column 10 is
a property in Article model).
"""
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": 10}
)
self.assertContentBefore(
response,
"Oldest content",
"Middle content",
"Results of sorting on property are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Newest content",
"Results of sorting on property are out of order.",
)
def test_change_list_sorting_callable_query_expression(self):
"""Query expressions may be used for admin_order_field."""
tests = [
("order_by_expression", 9),
("order_by_f_expression", 12),
("order_by_orderby_expression", 13),
]
for admin_order_field, index in tests:
with self.subTest(admin_order_field):
response = self.client.get(
reverse("admin:admin_views_article_changelist"),
{"o": index},
)
self.assertContentBefore(
response,
"Oldest content",
"Middle content",
"Results of sorting on callable are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Newest content",
"Results of sorting on callable are out of order.",
)
def test_change_list_sorting_callable_query_expression_reverse(self):
tests = [
("order_by_expression", -9),
("order_by_f_expression", -12),
("order_by_orderby_expression", -13),
]
for admin_order_field, index in tests:
with self.subTest(admin_order_field):
response = self.client.get(
reverse("admin:admin_views_article_changelist"),
{"o": index},
)
self.assertContentBefore(
response,
"Middle content",
"Oldest content",
"Results of sorting on callable are out of order.",
)
self.assertContentBefore(
response,
"Newest content",
"Middle content",
"Results of sorting on callable are out of order.",
)
def test_change_list_sorting_model(self):
"""
Ensure we can sort on a list_display field that is a Model method
(column 3 is 'model_year' in ArticleAdmin)
"""
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": "-3"}
)
self.assertContentBefore(
response,
"Newest content",
"Middle content",
"Results of sorting on Model method are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Oldest content",
"Results of sorting on Model method are out of order.",
)
def test_change_list_sorting_model_admin(self):
"""
Ensure we can sort on a list_display field that is a ModelAdmin method
(column 4 is 'modeladmin_year' in ArticleAdmin)
"""
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": "4"}
)
self.assertContentBefore(
response,
"Oldest content",
"Middle content",
"Results of sorting on ModelAdmin method are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Newest content",
"Results of sorting on ModelAdmin method are out of order.",
)
def test_change_list_sorting_model_admin_reverse(self):
"""
Ensure we can sort on a list_display field that is a ModelAdmin
method in reverse order (i.e. admin_order_field uses the '-' prefix)
(column 6 is 'model_year_reverse' in ArticleAdmin)
"""
td = '<td class="field-model_property_year">%s</td>'
td_2000, td_2008, td_2009 = td % 2000, td % 2008, td % 2009
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": "6"}
)
self.assertContentBefore(
response,
td_2009,
td_2008,
"Results of sorting on ModelAdmin method are out of order.",
)
self.assertContentBefore(
response,
td_2008,
td_2000,
"Results of sorting on ModelAdmin method are out of order.",
)
# Let's make sure the ordering is right and that we don't get a
# FieldError when we change to descending order
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": "-6"}
)
self.assertContentBefore(
response,
td_2000,
td_2008,
"Results of sorting on ModelAdmin method are out of order.",
)
self.assertContentBefore(
response,
td_2008,
td_2009,
"Results of sorting on ModelAdmin method are out of order.",
)
def test_change_list_sorting_multiple(self):
p1 = Person.objects.create(name="Chris", gender=1, alive=True)
p2 = Person.objects.create(name="Chris", gender=2, alive=True)
p3 = Person.objects.create(name="Bob", gender=1, alive=True)
link1 = reverse("admin:admin_views_person_change", args=(p1.pk,))
link2 = reverse("admin:admin_views_person_change", args=(p2.pk,))
link3 = reverse("admin:admin_views_person_change", args=(p3.pk,))
# Sort by name, gender
response = self.client.get(
reverse("admin:admin_views_person_changelist"), {"o": "1.2"}
)
self.assertContentBefore(response, link3, link1)
self.assertContentBefore(response, link1, link2)
# Sort by gender descending, name
response = self.client.get(
reverse("admin:admin_views_person_changelist"), {"o": "-2.1"}
)
self.assertContentBefore(response, link2, link3)
self.assertContentBefore(response, link3, link1)
def test_change_list_sorting_preserve_queryset_ordering(self):
"""
If no ordering is defined in `ModelAdmin.ordering` or in the query
string, then the underlying order of the queryset should not be
changed, even if it is defined in `Modeladmin.get_queryset()`.
Refs #11868, #7309.
"""
p1 = Person.objects.create(name="Amy", gender=1, alive=True, age=80)
p2 = Person.objects.create(name="Bob", gender=1, alive=True, age=70)
p3 = Person.objects.create(name="Chris", gender=2, alive=False, age=60)
link1 = reverse("admin:admin_views_person_change", args=(p1.pk,))
link2 = reverse("admin:admin_views_person_change", args=(p2.pk,))
link3 = reverse("admin:admin_views_person_change", args=(p3.pk,))
response = self.client.get(reverse("admin:admin_views_person_changelist"), {})
self.assertContentBefore(response, link3, link2)
self.assertContentBefore(response, link2, link1)
def test_change_list_sorting_model_meta(self):
# Test ordering on Model Meta is respected
l1 = Language.objects.create(iso="ur", name="Urdu")
l2 = Language.objects.create(iso="ar", name="Arabic")
link1 = reverse("admin:admin_views_language_change", args=(quote(l1.pk),))
link2 = reverse("admin:admin_views_language_change", args=(quote(l2.pk),))
response = self.client.get(reverse("admin:admin_views_language_changelist"), {})
self.assertContentBefore(response, link2, link1)
# Test we can override with query string
response = self.client.get(
reverse("admin:admin_views_language_changelist"), {"o": "-1"}
)
self.assertContentBefore(response, link1, link2)
def test_change_list_sorting_override_model_admin(self):
# Test ordering on Model Admin is respected, and overrides Model Meta
dt = datetime.datetime.now()
p1 = Podcast.objects.create(name="A", release_date=dt)
p2 = Podcast.objects.create(name="B", release_date=dt - datetime.timedelta(10))
link1 = reverse("admin:admin_views_podcast_change", args=(p1.pk,))
link2 = reverse("admin:admin_views_podcast_change", args=(p2.pk,))
response = self.client.get(reverse("admin:admin_views_podcast_changelist"), {})
self.assertContentBefore(response, link1, link2)
def test_multiple_sort_same_field(self):
# The changelist displays the correct columns if two columns correspond
# to the same ordering field.
dt = datetime.datetime.now()
p1 = Podcast.objects.create(name="A", release_date=dt)
p2 = Podcast.objects.create(name="B", release_date=dt - datetime.timedelta(10))
link1 = reverse("admin:admin_views_podcast_change", args=(quote(p1.pk),))
link2 = reverse("admin:admin_views_podcast_change", args=(quote(p2.pk),))
response = self.client.get(reverse("admin:admin_views_podcast_changelist"), {})
self.assertContentBefore(response, link1, link2)
p1 = ComplexSortedPerson.objects.create(name="Bob", age=10)
p2 = ComplexSortedPerson.objects.create(name="Amy", age=20)
link1 = reverse("admin:admin_views_complexsortedperson_change", args=(p1.pk,))
link2 = reverse("admin:admin_views_complexsortedperson_change", args=(p2.pk,))
response = self.client.get(
reverse("admin:admin_views_complexsortedperson_changelist"), {}
)
# Should have 5 columns (including action checkbox col)
self.assertContains(response, '<th scope="col"', count=5)
self.assertContains(response, "Name")
self.assertContains(response, "Colored name")
# Check order
self.assertContentBefore(response, "Name", "Colored name")
# Check sorting - should be by name
self.assertContentBefore(response, link2, link1)
def test_sort_indicators_admin_order(self):
"""
The admin shows default sort indicators for all kinds of 'ordering'
fields: field names, method on the model admin and model itself, and
other callables. See #17252.
"""
models = [
(AdminOrderedField, "adminorderedfield"),
(AdminOrderedModelMethod, "adminorderedmodelmethod"),
(AdminOrderedAdminMethod, "adminorderedadminmethod"),
(AdminOrderedCallable, "adminorderedcallable"),
]
for model, url in models:
model.objects.create(stuff="The Last Item", order=3)
model.objects.create(stuff="The First Item", order=1)
model.objects.create(stuff="The Middle Item", order=2)
response = self.client.get(
reverse("admin:admin_views_%s_changelist" % url), {}
)
# Should have 3 columns including action checkbox col.
self.assertContains(response, '<th scope="col"', count=3, msg_prefix=url)
# Check if the correct column was selected. 2 is the index of the
# 'order' column in the model admin's 'list_display' with 0 being
# the implicit 'action_checkbox' and 1 being the column 'stuff'.
self.assertEqual(
response.context["cl"].get_ordering_field_columns(), {2: "asc"}
)
# Check order of records.
self.assertContentBefore(response, "The First Item", "The Middle Item")
self.assertContentBefore(response, "The Middle Item", "The Last Item")
def test_has_related_field_in_list_display_fk(self):
"""Joins shouldn't be performed for <FK>_id fields in list display."""
state = State.objects.create(name="Karnataka")
City.objects.create(state=state, name="Bangalore")
response = self.client.get(reverse("admin:admin_views_city_changelist"), {})
response.context["cl"].list_display = ["id", "name", "state"]
self.assertIs(response.context["cl"].has_related_field_in_list_display(), True)
response.context["cl"].list_display = ["id", "name", "state_id"]
self.assertIs(response.context["cl"].has_related_field_in_list_display(), False)
def test_has_related_field_in_list_display_o2o(self):
"""Joins shouldn't be performed for <O2O>_id fields in list display."""
media = Media.objects.create(name="Foo")
Vodcast.objects.create(media=media)
response = self.client.get(reverse("admin:admin_views_vodcast_changelist"), {})
response.context["cl"].list_display = ["media"]
self.assertIs(response.context["cl"].has_related_field_in_list_display(), True)
response.context["cl"].list_display = ["media_id"]
self.assertIs(response.context["cl"].has_related_field_in_list_display(), False)
def test_limited_filter(self):
"""
Admin changelist filters do not contain objects excluded via
limit_choices_to.
"""
response = self.client.get(reverse("admin:admin_views_thing_changelist"))
self.assertContains(
response,
'<div id="changelist-filter">',
msg_prefix="Expected filter not found in changelist view",
)
self.assertNotContains(
response,
'<a href="?color__id__exact=3">Blue</a>',
msg_prefix="Changelist filter not correctly limited by limit_choices_to",
)
def test_relation_spanning_filters(self):
changelist_url = reverse("admin:admin_views_chapterxtra1_changelist")
response = self.client.get(changelist_url)
self.assertContains(response, '<div id="changelist-filter">')
filters = {
"chap__id__exact": {
"values": [c.id for c in Chapter.objects.all()],
"test": lambda obj, value: obj.chap.id == value,
},
"chap__title": {
"values": [c.title for c in Chapter.objects.all()],
"test": lambda obj, value: obj.chap.title == value,
},
"chap__book__id__exact": {
"values": [b.id for b in Book.objects.all()],
"test": lambda obj, value: obj.chap.book.id == value,
},
"chap__book__name": {
"values": [b.name for b in Book.objects.all()],
"test": lambda obj, value: obj.chap.book.name == value,
},
"chap__book__promo__id__exact": {
"values": [p.id for p in Promo.objects.all()],
"test": lambda obj, value: obj.chap.book.promo_set.filter(
id=value
).exists(),
},
"chap__book__promo__name": {
"values": [p.name for p in Promo.objects.all()],
"test": lambda obj, value: obj.chap.book.promo_set.filter(
name=value
).exists(),
},
# A forward relation (book) after a reverse relation (promo).
"guest_author__promo__book__id__exact": {
"values": [p.id for p in Book.objects.all()],
"test": lambda obj, value: obj.guest_author.promo_set.filter(
book=value
).exists(),
},
}
for filter_path, params in filters.items():
for value in params["values"]:
query_string = urlencode({filter_path: value})
# ensure filter link exists
self.assertContains(response, '<a href="?%s"' % query_string)
# ensure link works
filtered_response = self.client.get(
"%s?%s" % (changelist_url, query_string)
)
self.assertEqual(filtered_response.status_code, 200)
# ensure changelist contains only valid objects
for obj in filtered_response.context["cl"].queryset.all():
self.assertTrue(params["test"](obj, value))
def test_incorrect_lookup_parameters(self):
"""Ensure incorrect lookup parameters are handled gracefully."""
changelist_url = reverse("admin:admin_views_thing_changelist")
response = self.client.get(changelist_url, {"notarealfield": "5"})
self.assertRedirects(response, "%s?e=1" % changelist_url)
# Spanning relationships through a nonexistent related object (Refs #16716)
response = self.client.get(changelist_url, {"notarealfield__whatever": "5"})
self.assertRedirects(response, "%s?e=1" % changelist_url)
response = self.client.get(
changelist_url, {"color__id__exact": "StringNotInteger!"}
)
self.assertRedirects(response, "%s?e=1" % changelist_url)
# Regression test for #18530
response = self.client.get(changelist_url, {"pub_date__gte": "foo"})
self.assertRedirects(response, "%s?e=1" % changelist_url)
def test_isnull_lookups(self):
"""Ensure is_null is handled correctly."""
Article.objects.create(
title="I Could Go Anywhere",
content="Versatile",
date=datetime.datetime.now(),
)
changelist_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(changelist_url)
self.assertContains(response, "4 articles")
response = self.client.get(changelist_url, {"section__isnull": "false"})
self.assertContains(response, "3 articles")
response = self.client.get(changelist_url, {"section__isnull": "0"})
self.assertContains(response, "3 articles")
response = self.client.get(changelist_url, {"section__isnull": "true"})
self.assertContains(response, "1 article")
response = self.client.get(changelist_url, {"section__isnull": "1"})
self.assertContains(response, "1 article")
def test_logout_and_password_change_URLs(self):
response = self.client.get(reverse("admin:admin_views_article_changelist"))
self.assertContains(
response,
'<form id="logout-form" method="post" action="%s">'
% reverse("admin:logout"),
)
self.assertContains(
response, '<a href="%s">' % reverse("admin:password_change")
)
def test_named_group_field_choices_change_list(self):
"""
Ensures the admin changelist shows correct values in the relevant column
for rows corresponding to instances of a model in which a named group
has been used in the choices option of a field.
"""
link1 = reverse("admin:admin_views_fabric_change", args=(self.fab1.pk,))
link2 = reverse("admin:admin_views_fabric_change", args=(self.fab2.pk,))
response = self.client.get(reverse("admin:admin_views_fabric_changelist"))
fail_msg = (
"Changelist table isn't showing the right human-readable values "
"set by a model field 'choices' option named group."
)
self.assertContains(
response,
'<a href="%s">Horizontal</a>' % link1,
msg_prefix=fail_msg,
html=True,
)
self.assertContains(
response,
'<a href="%s">Vertical</a>' % link2,
msg_prefix=fail_msg,
html=True,
)
def test_named_group_field_choices_filter(self):
"""
Ensures the filter UI shows correctly when at least one named group has
been used in the choices option of a model field.
"""
response = self.client.get(reverse("admin:admin_views_fabric_changelist"))
fail_msg = (
"Changelist filter isn't showing options contained inside a model "
"field 'choices' option named group."
)
self.assertContains(response, '<div id="changelist-filter">')
self.assertContains(
response,
'<a href="?surface__exact=x">Horizontal</a>',
msg_prefix=fail_msg,
html=True,
)
self.assertContains(
response,
'<a href="?surface__exact=y">Vertical</a>',
msg_prefix=fail_msg,
html=True,
)
def test_change_list_null_boolean_display(self):
Post.objects.create(public=None)
response = self.client.get(reverse("admin:admin_views_post_changelist"))
self.assertContains(response, "icon-unknown.svg")
def test_display_decorator_with_boolean_and_empty_value(self):
msg = (
"The boolean and empty_value arguments to the @display decorator "
"are mutually exclusive."
)
with self.assertRaisesMessage(ValueError, msg):
class BookAdmin(admin.ModelAdmin):
@admin.display(boolean=True, empty_value="(Missing)")
def is_published(self, obj):
return obj.publish_date is not None
def test_i18n_language_non_english_default(self):
"""
Check if the JavaScript i18n view returns an empty language catalog
if the default language is non-English but the selected language
is English. See #13388 and #3594 for more details.
"""
with self.settings(LANGUAGE_CODE="fr"), translation.override("en-us"):
response = self.client.get(reverse("admin:jsi18n"))
self.assertNotContains(response, "Choisir une heure")
def test_i18n_language_non_english_fallback(self):
"""
Makes sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE="fr"), translation.override("none"):
response = self.client.get(reverse("admin:jsi18n"))
self.assertContains(response, "Choisir une heure")
def test_jsi18n_with_context(self):
response = self.client.get(reverse("admin-extra-context:jsi18n"))
self.assertEqual(response.status_code, 200)
def test_jsi18n_format_fallback(self):
"""
The JavaScript i18n view doesn't return localized date/time formats
when the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE="ru"), translation.override("none"):
response = self.client.get(reverse("admin:jsi18n"))
self.assertNotContains(response, "%d.%m.%Y %H:%M:%S")
self.assertContains(response, "%Y-%m-%d %H:%M:%S")
def test_disallowed_filtering(self):
with self.assertLogs("django.security.DisallowedModelAdminLookup", "ERROR"):
response = self.client.get(
"%s?owner__email__startswith=fuzzy"
% reverse("admin:admin_views_album_changelist")
)
self.assertEqual(response.status_code, 400)
# Filters are allowed if explicitly included in list_filter
response = self.client.get(
"%s?color__value__startswith=red"
% reverse("admin:admin_views_thing_changelist")
)
self.assertEqual(response.status_code, 200)
response = self.client.get(
"%s?color__value=red" % reverse("admin:admin_views_thing_changelist")
)
self.assertEqual(response.status_code, 200)
# Filters should be allowed if they involve a local field without the
# need to allow them in list_filter or date_hierarchy.
response = self.client.get(
"%s?age__gt=30" % reverse("admin:admin_views_person_changelist")
)
self.assertEqual(response.status_code, 200)
e1 = Employee.objects.create(
name="Anonymous", gender=1, age=22, alive=True, code="123"
)
e2 = Employee.objects.create(
name="Visitor", gender=2, age=19, alive=True, code="124"
)
WorkHour.objects.create(datum=datetime.datetime.now(), employee=e1)
WorkHour.objects.create(datum=datetime.datetime.now(), employee=e2)
response = self.client.get(reverse("admin:admin_views_workhour_changelist"))
self.assertContains(response, "employee__person_ptr__exact")
response = self.client.get(
"%s?employee__person_ptr__exact=%d"
% (reverse("admin:admin_views_workhour_changelist"), e1.pk)
)
self.assertEqual(response.status_code, 200)
def test_disallowed_to_field(self):
url = reverse("admin:admin_views_section_changelist")
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.get(url, {TO_FIELD_VAR: "missing_field"})
self.assertEqual(response.status_code, 400)
# Specifying a field that is not referred by any other model registered
# to this admin site should raise an exception.
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.get(
reverse("admin:admin_views_section_changelist"), {TO_FIELD_VAR: "name"}
)
self.assertEqual(response.status_code, 400)
# Primary key should always be allowed, even if the referenced model
# isn't registered.
response = self.client.get(
reverse("admin:admin_views_notreferenced_changelist"), {TO_FIELD_VAR: "id"}
)
self.assertEqual(response.status_code, 200)
# Specifying a field referenced by another model though a m2m should be
# allowed.
response = self.client.get(
reverse("admin:admin_views_recipe_changelist"), {TO_FIELD_VAR: "rname"}
)
self.assertEqual(response.status_code, 200)
# Specifying a field referenced through a reverse m2m relationship
# should be allowed.
response = self.client.get(
reverse("admin:admin_views_ingredient_changelist"), {TO_FIELD_VAR: "iname"}
)
self.assertEqual(response.status_code, 200)
# Specifying a field that is not referred by any other model directly
# registered to this admin site but registered through inheritance
# should be allowed.
response = self.client.get(
reverse("admin:admin_views_referencedbyparent_changelist"),
{TO_FIELD_VAR: "name"},
)
self.assertEqual(response.status_code, 200)
# Specifying a field that is only referred to by a inline of a
# registered model should be allowed.
response = self.client.get(
reverse("admin:admin_views_referencedbyinline_changelist"),
{TO_FIELD_VAR: "name"},
)
self.assertEqual(response.status_code, 200)
# #25622 - Specifying a field of a model only referred by a generic
# relation should raise DisallowedModelAdminToField.
url = reverse("admin:admin_views_referencedbygenrel_changelist")
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.get(url, {TO_FIELD_VAR: "object_id"})
self.assertEqual(response.status_code, 400)
# We also want to prevent the add, change, and delete views from
# leaking a disallowed field value.
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.post(
reverse("admin:admin_views_section_add"), {TO_FIELD_VAR: "name"}
)
self.assertEqual(response.status_code, 400)
section = Section.objects.create()
url = reverse("admin:admin_views_section_change", args=(section.pk,))
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.post(url, {TO_FIELD_VAR: "name"})
self.assertEqual(response.status_code, 400)
url = reverse("admin:admin_views_section_delete", args=(section.pk,))
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.post(url, {TO_FIELD_VAR: "name"})
self.assertEqual(response.status_code, 400)
def test_allowed_filtering_15103(self):
"""
Regressions test for ticket 15103 - filtering on fields defined in a
ForeignKey 'limit_choices_to' should be allowed, otherwise raw_id_fields
can break.
"""
# Filters should be allowed if they are defined on a ForeignKey
# pointing to this model.
url = "%s?leader__name=Palin&leader__age=27" % reverse(
"admin:admin_views_inquisition_changelist"
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_popup_dismiss_related(self):
"""
Regression test for ticket 20664 - ensure the pk is properly quoted.
"""
actor = Actor.objects.create(name="Palin", age=27)
response = self.client.get(
"%s?%s" % (reverse("admin:admin_views_actor_changelist"), IS_POPUP_VAR)
)
self.assertContains(response, 'data-popup-opener="%s"' % actor.pk)
def test_hide_change_password(self):
"""
Tests if the "change password" link in the admin is hidden if the User
does not have a usable password set.
(against 9bea85795705d015cdadc82c68b99196a8554f5c)
"""
user = User.objects.get(username="super")
user.set_unusable_password()
user.save()
self.client.force_login(user)
response = self.client.get(reverse("admin:index"))
self.assertNotContains(
response,
reverse("admin:password_change"),
msg_prefix=(
'The "change password" link should not be displayed if a user does not '
"have a usable password."
),
)
def test_change_view_with_show_delete_extra_context(self):
"""
The 'show_delete' context variable in the admin's change view controls
the display of the delete button.
"""
instance = UndeletableObject.objects.create(name="foo")
response = self.client.get(
reverse("admin:admin_views_undeletableobject_change", args=(instance.pk,))
)
self.assertNotContains(response, "deletelink")
def test_change_view_logs_m2m_field_changes(self):
"""Changes to ManyToManyFields are included in the object's history."""
pizza = ReadablePizza.objects.create(name="Cheese")
cheese = Topping.objects.create(name="cheese")
post_data = {"name": pizza.name, "toppings": [cheese.pk]}
response = self.client.post(
reverse("admin:admin_views_readablepizza_change", args=(pizza.pk,)),
post_data,
)
self.assertRedirects(
response, reverse("admin:admin_views_readablepizza_changelist")
)
pizza_ctype = ContentType.objects.get_for_model(
ReadablePizza, for_concrete_model=False
)
log = LogEntry.objects.filter(
content_type=pizza_ctype, object_id=pizza.pk
).first()
self.assertEqual(log.get_change_message(), "Changed Toppings.")
def test_allows_attributeerror_to_bubble_up(self):
"""
AttributeErrors are allowed to bubble when raised inside a change list
view. Requires a model to be created so there's something to display.
Refs: #16655, #18593, and #18747
"""
Simple.objects.create()
with self.assertRaises(AttributeError):
self.client.get(reverse("admin:admin_views_simple_changelist"))
def test_changelist_with_no_change_url(self):
"""
ModelAdmin.changelist_view shouldn't result in a NoReverseMatch if url
for change_view is removed from get_urls (#20934).
"""
o = UnchangeableObject.objects.create()
response = self.client.get(
reverse("admin:admin_views_unchangeableobject_changelist")
)
# Check the format of the shown object -- shouldn't contain a change link
self.assertContains(
response, '<th class="field-__str__">%s</th>' % o, html=True
)
def test_invalid_appindex_url(self):
"""
#21056 -- URL reversing shouldn't work for nonexistent apps.
"""
good_url = "/test_admin/admin/admin_views/"
confirm_good_url = reverse(
"admin:app_list", kwargs={"app_label": "admin_views"}
)
self.assertEqual(good_url, confirm_good_url)
with self.assertRaises(NoReverseMatch):
reverse("admin:app_list", kwargs={"app_label": "this_should_fail"})
with self.assertRaises(NoReverseMatch):
reverse("admin:app_list", args=("admin_views2",))
def test_resolve_admin_views(self):
index_match = resolve("/test_admin/admin4/")
list_match = resolve("/test_admin/admin4/auth/user/")
self.assertIs(index_match.func.admin_site, customadmin.simple_site)
self.assertIsInstance(
list_match.func.model_admin, customadmin.CustomPwdTemplateUserAdmin
)
def test_adminsite_display_site_url(self):
"""
#13749 - Admin should display link to front-end site 'View site'
"""
url = reverse("admin:index")
response = self.client.get(url)
self.assertEqual(response.context["site_url"], "/my-site-url/")
self.assertContains(response, '<a href="/my-site-url/">View site</a>')
def test_date_hierarchy_empty_queryset(self):
self.assertIs(Question.objects.exists(), False)
response = self.client.get(reverse("admin:admin_views_answer2_changelist"))
self.assertEqual(response.status_code, 200)
@override_settings(TIME_ZONE="America/Sao_Paulo", USE_TZ=True)
def test_date_hierarchy_timezone_dst(self):
# This datetime doesn't exist in this timezone due to DST.
for date in make_aware_datetimes(
datetime.datetime(2016, 10, 16, 15), "America/Sao_Paulo"
):
with self.subTest(repr(date.tzinfo)):
q = Question.objects.create(question="Why?", expires=date)
Answer2.objects.create(question=q, answer="Because.")
response = self.client.get(
reverse("admin:admin_views_answer2_changelist")
)
self.assertContains(response, "question__expires__day=16")
self.assertContains(response, "question__expires__month=10")
self.assertContains(response, "question__expires__year=2016")
@override_settings(TIME_ZONE="America/Los_Angeles", USE_TZ=True)
def test_date_hierarchy_local_date_differ_from_utc(self):
# This datetime is 2017-01-01 in UTC.
for date in make_aware_datetimes(
datetime.datetime(2016, 12, 31, 16), "America/Los_Angeles"
):
with self.subTest(repr(date.tzinfo)):
q = Question.objects.create(question="Why?", expires=date)
Answer2.objects.create(question=q, answer="Because.")
response = self.client.get(
reverse("admin:admin_views_answer2_changelist")
)
self.assertContains(response, "question__expires__day=31")
self.assertContains(response, "question__expires__month=12")
self.assertContains(response, "question__expires__year=2016")
def test_sortable_by_columns_subset(self):
expected_sortable_fields = ("date", "callable_year")
expected_not_sortable_fields = (
"content",
"model_year",
"modeladmin_year",
"model_year_reversed",
"section",
)
response = self.client.get(reverse("admin6:admin_views_article_changelist"))
for field_name in expected_sortable_fields:
self.assertContains(
response, '<th scope="col" class="sortable column-%s">' % field_name
)
for field_name in expected_not_sortable_fields:
self.assertContains(
response, '<th scope="col" class="column-%s">' % field_name
)
def test_get_sortable_by_columns_subset(self):
response = self.client.get(reverse("admin6:admin_views_actor_changelist"))
self.assertContains(response, '<th scope="col" class="sortable column-age">')
self.assertContains(response, '<th scope="col" class="column-name">')
def test_sortable_by_no_column(self):
expected_not_sortable_fields = ("title", "book")
response = self.client.get(reverse("admin6:admin_views_chapter_changelist"))
for field_name in expected_not_sortable_fields:
self.assertContains(
response, '<th scope="col" class="column-%s">' % field_name
)
self.assertNotContains(response, '<th scope="col" class="sortable column')
def test_get_sortable_by_no_column(self):
response = self.client.get(reverse("admin6:admin_views_color_changelist"))
self.assertContains(response, '<th scope="col" class="column-value">')
self.assertNotContains(response, '<th scope="col" class="sortable column')
def test_app_index_context(self):
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertContains(
response,
"<title>Admin_Views administration | Django site admin</title>",
)
self.assertEqual(response.context["title"], "Admin_Views administration")
self.assertEqual(response.context["app_label"], "admin_views")
# Models are sorted alphabetically by default.
models = [model["name"] for model in response.context["app_list"][0]["models"]]
self.assertSequenceEqual(models, sorted(models))
def test_app_index_context_reordered(self):
self.client.force_login(self.superuser)
response = self.client.get(reverse("admin2:app_list", args=("admin_views",)))
self.assertContains(
response,
"<title>Admin_Views administration | Django site admin</title>",
)
# Models are in reverse order.
models = [model["name"] for model in response.context["app_list"][0]["models"]]
self.assertSequenceEqual(models, sorted(models, reverse=True))
def test_change_view_subtitle_per_object(self):
response = self.client.get(
reverse("admin:admin_views_article_change", args=(self.a1.pk,)),
)
self.assertContains(
response,
"<title>Article 1 | Change article | Django site admin</title>",
)
self.assertContains(response, "<h1>Change article</h1>")
self.assertContains(response, "<h2>Article 1</h2>")
response = self.client.get(
reverse("admin:admin_views_article_change", args=(self.a2.pk,)),
)
self.assertContains(
response,
"<title>Article 2 | Change article | Django site admin</title>",
)
self.assertContains(response, "<h1>Change article</h1>")
self.assertContains(response, "<h2>Article 2</h2>")
def test_view_subtitle_per_object(self):
viewuser = User.objects.create_user(
username="viewuser",
password="secret",
is_staff=True,
)
viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("view", Article._meta)),
)
self.client.force_login(viewuser)
response = self.client.get(
reverse("admin:admin_views_article_change", args=(self.a1.pk,)),
)
self.assertContains(
response,
"<title>Article 1 | View article | Django site admin</title>",
)
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(response, "<h2>Article 1</h2>")
response = self.client.get(
reverse("admin:admin_views_article_change", args=(self.a2.pk,)),
)
self.assertContains(
response,
"<title>Article 2 | View article | Django site admin</title>",
)
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(response, "<h2>Article 2</h2>")
def test_formset_kwargs_can_be_overridden(self):
response = self.client.get(reverse("admin:admin_views_city_add"))
self.assertContains(response, "overridden_name")
def test_render_views_no_subtitle(self):
tests = [
reverse("admin:index"),
reverse("admin:password_change"),
reverse("admin:app_list", args=("admin_views",)),
reverse("admin:admin_views_article_delete", args=(self.a1.pk,)),
reverse("admin:admin_views_article_history", args=(self.a1.pk,)),
]
for url in tests:
with self.subTest(url=url):
with self.assertNoLogs("django.template", "DEBUG"):
self.client.get(url)
# Login must be after logout.
with self.assertNoLogs("django.template", "DEBUG"):
self.client.post(reverse("admin:logout"))
self.client.get(reverse("admin:login"))
def test_render_delete_selected_confirmation_no_subtitle(self):
post_data = {
"action": "delete_selected",
"selected_across": "0",
"index": "0",
"_selected_action": self.a1.pk,
}
with self.assertNoLogs("django.template", "DEBUG"):
self.client.post(reverse("admin:admin_views_article_changelist"), post_data)
@override_settings(
AUTH_PASSWORD_VALIDATORS=[
{
"NAME": (
"django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator"
)
},
{
"NAME": (
"django.contrib.auth.password_validation."
"NumericPasswordValidator"
)
},
]
)
def test_password_change_helptext(self):
response = self.client.get(reverse("admin:password_change"))
self.assertContains(
response, '<div class="help" id="id_new_password1_helptext">'
)
@override_settings(
AUTH_PASSWORD_VALIDATORS=[
{
"NAME": (
"django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator"
)
},
{
"NAME": (
"django.contrib.auth.password_validation." "NumericPasswordValidator"
)
},
],
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
# Put this app's and the shared tests templates dirs in DIRS to
# take precedence over the admin's templates dir.
"DIRS": [
os.path.join(os.path.dirname(__file__), "templates"),
os.path.join(os.path.dirname(os.path.dirname(__file__)), "templates"),
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
class AdminCustomTemplateTests(AdminViewBasicTestCase):
def test_custom_model_admin_templates(self):
# Test custom change list template with custom extra context
response = self.client.get(
reverse("admin:admin_views_customarticle_changelist")
)
self.assertContains(response, "var hello = 'Hello!';")
self.assertTemplateUsed(response, "custom_admin/change_list.html")
# Test custom add form template
response = self.client.get(reverse("admin:admin_views_customarticle_add"))
self.assertTemplateUsed(response, "custom_admin/add_form.html")
# Add an article so we can test delete, change, and history views
post = self.client.post(
reverse("admin:admin_views_customarticle_add"),
{
"content": "<p>great article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
},
)
self.assertRedirects(
post, reverse("admin:admin_views_customarticle_changelist")
)
self.assertEqual(CustomArticle.objects.count(), 1)
article_pk = CustomArticle.objects.all()[0].pk
# Test custom delete, change, and object history templates
# Test custom change form template
response = self.client.get(
reverse("admin:admin_views_customarticle_change", args=(article_pk,))
)
self.assertTemplateUsed(response, "custom_admin/change_form.html")
response = self.client.get(
reverse("admin:admin_views_customarticle_delete", args=(article_pk,))
)
self.assertTemplateUsed(response, "custom_admin/delete_confirmation.html")
response = self.client.post(
reverse("admin:admin_views_customarticle_changelist"),
data={
"index": 0,
"action": ["delete_selected"],
"_selected_action": ["1"],
},
)
self.assertTemplateUsed(
response, "custom_admin/delete_selected_confirmation.html"
)
response = self.client.get(
reverse("admin:admin_views_customarticle_history", args=(article_pk,))
)
self.assertTemplateUsed(response, "custom_admin/object_history.html")
# A custom popup response template may be specified by
# ModelAdmin.popup_response_template.
response = self.client.post(
reverse("admin:admin_views_customarticle_add") + "?%s=1" % IS_POPUP_VAR,
{
"content": "<p>great article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
IS_POPUP_VAR: "1",
},
)
self.assertEqual(response.template_name, "custom_admin/popup_response.html")
def test_extended_bodyclass_template_change_form(self):
"""
The admin/change_form.html template uses block.super in the
bodyclass block.
"""
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_change_password_template(self):
user = User.objects.get(username="super")
response = self.client.get(
reverse("admin:auth_user_password_change", args=(user.id,))
)
# The auth/user/change_password.html template uses super in the
# bodyclass block.
self.assertContains(response, "bodyclass_consistency_check ")
# When a site has multiple passwords in the browser's password manager,
# a browser pop up asks which user the new password is for. To prevent
# this, the username is added to the change password form.
self.assertContains(
response, '<input type="text" name="username" value="super" class="hidden">'
)
# help text for passwords has an id.
self.assertContains(
response,
'<div class="help" id="id_password1_helptext"><ul><li>'
"Your password can’t be too similar to your other personal information."
"</li><li>Your password can’t be entirely numeric.</li></ul></div>",
)
self.assertContains(
response,
'<div class="help" id="id_password2_helptext">'
"Enter the same password as before, for verification.</div>",
)
def test_extended_bodyclass_template_index(self):
"""
The admin/index.html template uses block.super in the bodyclass block.
"""
response = self.client.get(reverse("admin:index"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_change_list(self):
"""
The admin/change_list.html' template uses block.super
in the bodyclass block.
"""
response = self.client.get(reverse("admin:admin_views_article_changelist"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_template_login(self):
"""
The admin/login.html template uses block.super in the
bodyclass block.
"""
self.client.logout()
response = self.client.get(reverse("admin:login"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_template_delete_confirmation(self):
"""
The admin/delete_confirmation.html template uses
block.super in the bodyclass block.
"""
group = Group.objects.create(name="foogroup")
response = self.client.get(reverse("admin:auth_group_delete", args=(group.id,)))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_template_delete_selected_confirmation(self):
"""
The admin/delete_selected_confirmation.html template uses
block.super in bodyclass block.
"""
group = Group.objects.create(name="foogroup")
post_data = {
"action": "delete_selected",
"selected_across": "0",
"index": "0",
"_selected_action": group.id,
}
response = self.client.post(reverse("admin:auth_group_changelist"), post_data)
self.assertEqual(response.context["site_header"], "Django administration")
self.assertContains(response, "bodyclass_consistency_check ")
def test_filter_with_custom_template(self):
"""
A custom template can be used to render an admin filter.
"""
response = self.client.get(reverse("admin:admin_views_color2_changelist"))
self.assertTemplateUsed(response, "custom_filter_template.html")
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewFormUrlTest(TestCase):
current_app = "admin3"
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_change_form_URL_has_correct_value(self):
"""
change_view has form_url in response.context
"""
response = self.client.get(
reverse(
"admin:admin_views_section_change",
args=(self.s1.pk,),
current_app=self.current_app,
)
)
self.assertIn(
"form_url", response.context, msg="form_url not present in response.context"
)
self.assertEqual(response.context["form_url"], "pony")
def test_initial_data_can_be_overridden(self):
"""
The behavior for setting initial form data can be overridden in the
ModelAdmin class. Usually, the initial value is set via the GET params.
"""
response = self.client.get(
reverse("admin:admin_views_restaurant_add", current_app=self.current_app),
{"name": "test_value"},
)
# this would be the usual behaviour
self.assertNotContains(response, 'value="test_value"')
# this is the overridden behaviour
self.assertContains(response, 'value="overridden_value"')
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminJavaScriptTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_js_minified_only_if_debug_is_false(self):
"""
The minified versions of the JS files are only used when DEBUG is False.
"""
with override_settings(DEBUG=False):
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertNotContains(response, "vendor/jquery/jquery.js")
self.assertContains(response, "vendor/jquery/jquery.min.js")
self.assertContains(response, "prepopulate.js")
self.assertContains(response, "actions.js")
self.assertContains(response, "collapse.js")
self.assertContains(response, "inlines.js")
with override_settings(DEBUG=True):
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertContains(response, "vendor/jquery/jquery.js")
self.assertNotContains(response, "vendor/jquery/jquery.min.js")
self.assertContains(response, "prepopulate.js")
self.assertContains(response, "actions.js")
self.assertContains(response, "collapse.js")
self.assertContains(response, "inlines.js")
@override_settings(ROOT_URLCONF="admin_views.urls")
class SaveAsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.per1 = Person.objects.create(name="John Mauchly", gender=1, alive=True)
def setUp(self):
self.client.force_login(self.superuser)
def test_save_as_duplication(self):
"""'save as' creates a new person"""
post_data = {"_saveasnew": "", "name": "John M", "gender": 1, "age": 42}
response = self.client.post(
reverse("admin:admin_views_person_change", args=(self.per1.pk,)), post_data
)
self.assertEqual(len(Person.objects.filter(name="John M")), 1)
self.assertEqual(len(Person.objects.filter(id=self.per1.pk)), 1)
new_person = Person.objects.latest("id")
self.assertRedirects(
response, reverse("admin:admin_views_person_change", args=(new_person.pk,))
)
def test_save_as_continue_false(self):
"""
Saving a new object using "Save as new" redirects to the changelist
instead of the change view when ModelAdmin.save_as_continue=False.
"""
post_data = {"_saveasnew": "", "name": "John M", "gender": 1, "age": 42}
url = reverse(
"admin:admin_views_person_change",
args=(self.per1.pk,),
current_app=site2.name,
)
response = self.client.post(url, post_data)
self.assertEqual(len(Person.objects.filter(name="John M")), 1)
self.assertEqual(len(Person.objects.filter(id=self.per1.pk)), 1)
self.assertRedirects(
response,
reverse("admin:admin_views_person_changelist", current_app=site2.name),
)
def test_save_as_new_with_validation_errors(self):
"""
When you click "Save as new" and have a validation error,
you only see the "Save as new" button and not the other save buttons,
and that only the "Save as" button is visible.
"""
response = self.client.post(
reverse("admin:admin_views_person_change", args=(self.per1.pk,)),
{
"_saveasnew": "",
"gender": "invalid",
"_addanother": "fail",
},
)
self.assertContains(response, "Please correct the errors below.")
self.assertFalse(response.context["show_save_and_add_another"])
self.assertFalse(response.context["show_save_and_continue"])
self.assertTrue(response.context["show_save_as_new"])
def test_save_as_new_with_validation_errors_with_inlines(self):
parent = Parent.objects.create(name="Father")
child = Child.objects.create(parent=parent, name="Child")
response = self.client.post(
reverse("admin:admin_views_parent_change", args=(parent.pk,)),
{
"_saveasnew": "Save as new",
"child_set-0-parent": parent.pk,
"child_set-0-id": child.pk,
"child_set-0-name": "Child",
"child_set-INITIAL_FORMS": 1,
"child_set-MAX_NUM_FORMS": 1000,
"child_set-MIN_NUM_FORMS": 0,
"child_set-TOTAL_FORMS": 4,
"name": "_invalid",
},
)
self.assertContains(response, "Please correct the error below.")
self.assertFalse(response.context["show_save_and_add_another"])
self.assertFalse(response.context["show_save_and_continue"])
self.assertTrue(response.context["show_save_as_new"])
def test_save_as_new_with_inlines_with_validation_errors(self):
parent = Parent.objects.create(name="Father")
child = Child.objects.create(parent=parent, name="Child")
response = self.client.post(
reverse("admin:admin_views_parent_change", args=(parent.pk,)),
{
"_saveasnew": "Save as new",
"child_set-0-parent": parent.pk,
"child_set-0-id": child.pk,
"child_set-0-name": "_invalid",
"child_set-INITIAL_FORMS": 1,
"child_set-MAX_NUM_FORMS": 1000,
"child_set-MIN_NUM_FORMS": 0,
"child_set-TOTAL_FORMS": 4,
"name": "Father",
},
)
self.assertContains(response, "Please correct the error below.")
self.assertFalse(response.context["show_save_and_add_another"])
self.assertFalse(response.context["show_save_and_continue"])
self.assertTrue(response.context["show_save_as_new"])
@override_settings(ROOT_URLCONF="admin_views.urls")
class CustomModelAdminTest(AdminViewBasicTestCase):
def test_custom_admin_site_login_form(self):
self.client.logout()
response = self.client.get(reverse("admin2:index"), follow=True)
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
login = self.client.post(
reverse("admin2:login"),
{
REDIRECT_FIELD_NAME: reverse("admin2:index"),
"username": "customform",
"password": "secret",
},
follow=True,
)
self.assertIsInstance(login, TemplateResponse)
self.assertContains(login, "custom form error")
self.assertContains(login, "path/to/media.css")
def test_custom_admin_site_login_template(self):
self.client.logout()
response = self.client.get(reverse("admin2:index"), follow=True)
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/login.html")
self.assertContains(response, "Hello from a custom login template")
def test_custom_admin_site_logout_template(self):
response = self.client.post(reverse("admin2:logout"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/logout.html")
self.assertContains(response, "Hello from a custom logout template")
def test_custom_admin_site_index_view_and_template(self):
response = self.client.get(reverse("admin2:index"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/index.html")
self.assertContains(response, "Hello from a custom index template *bar*")
def test_custom_admin_site_app_index_view_and_template(self):
response = self.client.get(reverse("admin2:app_list", args=("admin_views",)))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/app_index.html")
self.assertContains(response, "Hello from a custom app_index template")
def test_custom_admin_site_password_change_template(self):
response = self.client.get(reverse("admin2:password_change"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/password_change_form.html")
self.assertContains(
response, "Hello from a custom password change form template"
)
def test_custom_admin_site_password_change_with_extra_context(self):
response = self.client.get(reverse("admin2:password_change"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/password_change_form.html")
self.assertContains(response, "eggs")
def test_custom_admin_site_password_change_done_template(self):
response = self.client.get(reverse("admin2:password_change_done"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/password_change_done.html")
self.assertContains(
response, "Hello from a custom password change done template"
)
def test_custom_admin_site_view(self):
self.client.force_login(self.superuser)
response = self.client.get(reverse("admin2:my_view"))
self.assertEqual(response.content, b"Django is a magical pony!")
def test_pwd_change_custom_template(self):
self.client.force_login(self.superuser)
su = User.objects.get(username="super")
response = self.client.get(
reverse("admin4:auth_user_password_change", args=(su.pk,))
)
self.assertEqual(response.status_code, 200)
def get_perm(Model, codename):
"""Return the permission object, for the Model"""
ct = ContentType.objects.get_for_model(Model, for_concrete_model=False)
return Permission.objects.get(content_type=ct, codename=codename)
@override_settings(
ROOT_URLCONF="admin_views.urls",
# Test with the admin's documented list of required context processors.
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
class AdminViewPermissionsTest(TestCase):
"""Tests for Admin Views Permissions."""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.viewuser = User.objects.create_user(
username="viewuser", password="secret", is_staff=True
)
cls.adduser = User.objects.create_user(
username="adduser", password="secret", is_staff=True
)
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.deleteuser = User.objects.create_user(
username="deleteuser", password="secret", is_staff=True
)
cls.joepublicuser = User.objects.create_user(
username="joepublic", password="secret"
)
cls.nostaffuser = User.objects.create_user(
username="nostaff", password="secret"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
another_section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
# Setup permissions, for our users who can add, change, and delete.
opts = Article._meta
# User who can view Articles
cls.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("view", opts))
)
# User who can add Articles
cls.adduser.user_permissions.add(
get_perm(Article, get_permission_codename("add", opts))
)
# User who can change Articles
cls.changeuser.user_permissions.add(
get_perm(Article, get_permission_codename("change", opts))
)
cls.nostaffuser.user_permissions.add(
get_perm(Article, get_permission_codename("change", opts))
)
# User who can delete Articles
cls.deleteuser.user_permissions.add(
get_perm(Article, get_permission_codename("delete", opts))
)
cls.deleteuser.user_permissions.add(
get_perm(Section, get_permission_codename("delete", Section._meta))
)
# login POST dicts
cls.index_url = reverse("admin:index")
cls.super_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "super",
"password": "secret",
}
cls.super_email_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "[email protected]",
"password": "secret",
}
cls.super_email_bad_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "[email protected]",
"password": "notsecret",
}
cls.adduser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "adduser",
"password": "secret",
}
cls.changeuser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "changeuser",
"password": "secret",
}
cls.deleteuser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "deleteuser",
"password": "secret",
}
cls.nostaff_login = {
REDIRECT_FIELD_NAME: reverse("has_permission_admin:index"),
"username": "nostaff",
"password": "secret",
}
cls.joepublic_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "joepublic",
"password": "secret",
}
cls.viewuser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "viewuser",
"password": "secret",
}
cls.no_username_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"password": "secret",
}
def test_login(self):
"""
Make sure only staff members can log in.
Successful posts to the login page will redirect to the original url.
Unsuccessful attempts will continue to render the login page with
a 200 status code.
"""
login_url = "%s?next=%s" % (reverse("admin:login"), reverse("admin:index"))
# Super User
response = self.client.get(self.index_url)
self.assertRedirects(response, login_url)
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Test if user enters email address
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.super_email_login)
self.assertContains(login, ERROR_MESSAGE)
# only correct passwords get a username hint
login = self.client.post(login_url, self.super_email_bad_login)
self.assertContains(login, ERROR_MESSAGE)
new_user = User(username="jondoe", password="secret", email="[email protected]")
new_user.save()
# check to ensure if there are multiple email addresses a user doesn't get a 500
login = self.client.post(login_url, self.super_email_login)
self.assertContains(login, ERROR_MESSAGE)
# View User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.viewuser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Add User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.adduser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Change User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.changeuser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Delete User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.deleteuser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Regular User should not be able to login.
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.joepublic_login)
self.assertContains(login, ERROR_MESSAGE)
# Requests without username should not return 500 errors.
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.no_username_login)
self.assertEqual(login.status_code, 200)
self.assertFormError(
login.context["form"], "username", ["This field is required."]
)
def test_login_redirect_for_direct_get(self):
"""
Login redirect should be to the admin index page when going directly to
/admin/login/.
"""
response = self.client.get(reverse("admin:login"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context[REDIRECT_FIELD_NAME], reverse("admin:index"))
def test_login_has_permission(self):
# Regular User should not be able to login.
response = self.client.get(reverse("has_permission_admin:index"))
self.assertEqual(response.status_code, 302)
login = self.client.post(
reverse("has_permission_admin:login"), self.joepublic_login
)
self.assertContains(login, "permission denied")
# User with permissions should be able to login.
response = self.client.get(reverse("has_permission_admin:index"))
self.assertEqual(response.status_code, 302)
login = self.client.post(
reverse("has_permission_admin:login"), self.nostaff_login
)
self.assertRedirects(login, reverse("has_permission_admin:index"))
self.assertFalse(login.context)
self.client.post(reverse("has_permission_admin:logout"))
# Staff should be able to login.
response = self.client.get(reverse("has_permission_admin:index"))
self.assertEqual(response.status_code, 302)
login = self.client.post(
reverse("has_permission_admin:login"),
{
REDIRECT_FIELD_NAME: reverse("has_permission_admin:index"),
"username": "deleteuser",
"password": "secret",
},
)
self.assertRedirects(login, reverse("has_permission_admin:index"))
self.assertFalse(login.context)
self.client.post(reverse("has_permission_admin:logout"))
def test_login_successfully_redirects_to_original_URL(self):
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
query_string = "the-answer=42"
redirect_url = "%s?%s" % (self.index_url, query_string)
new_next = {REDIRECT_FIELD_NAME: redirect_url}
post_data = self.super_login.copy()
post_data.pop(REDIRECT_FIELD_NAME)
login = self.client.post(
"%s?%s" % (reverse("admin:login"), urlencode(new_next)), post_data
)
self.assertRedirects(login, redirect_url)
def test_double_login_is_not_allowed(self):
"""Regression test for #19327"""
login_url = "%s?next=%s" % (reverse("admin:login"), reverse("admin:index"))
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
# Establish a valid admin session
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
# Logging in with non-admin user fails
login = self.client.post(login_url, self.joepublic_login)
self.assertContains(login, ERROR_MESSAGE)
# Establish a valid admin session
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
# Logging in with admin user while already logged in
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
def test_login_page_notice_for_non_staff_users(self):
"""
A logged-in non-staff user trying to access the admin index should be
presented with the login page and a hint indicating that the current
user doesn't have access to it.
"""
hint_template = "You are authenticated as {}"
# Anonymous user should not be shown the hint
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, "login-form")
self.assertNotContains(response, hint_template.format(""), status_code=200)
# Non-staff user should be shown the hint
self.client.force_login(self.nostaffuser)
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, "login-form")
self.assertContains(
response, hint_template.format(self.nostaffuser.username), status_code=200
)
def test_add_view(self):
"""Test add view restricts access and actually adds items."""
add_dict = {
"title": "Døm ikke",
"content": "<p>great article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
"section": self.s1.pk,
}
# Change User should not have access to add articles
self.client.force_login(self.changeuser)
# make sure the view removes test cookie
self.assertIs(self.client.session.test_cookie_worked(), False)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.status_code, 403)
# Try POST just to make sure
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.post(reverse("admin:logout"))
# View User should not have access to add articles
self.client.force_login(self.viewuser)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.status_code, 403)
# Try POST just to make sure
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
# Now give the user permission to add but not change.
self.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("add", Article._meta))
)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.context["title"], "Add article")
self.assertContains(response, "<title>Add article | Django site admin</title>")
self.assertContains(
response, '<input type="submit" value="Save and view" name="_continue">'
)
post = self.client.post(
reverse("admin:admin_views_article_add"), add_dict, follow=False
)
self.assertEqual(post.status_code, 302)
self.assertEqual(Article.objects.count(), 4)
article = Article.objects.latest("pk")
response = self.client.get(
reverse("admin:admin_views_article_change", args=(article.pk,))
)
self.assertContains(
response,
'<li class="success">The article “Døm ikke” was added successfully.</li>',
)
article.delete()
self.client.post(reverse("admin:logout"))
# Add user may login and POST to add view, then redirect to admin root
self.client.force_login(self.adduser)
addpage = self.client.get(reverse("admin:admin_views_article_add"))
change_list_link = '› <a href="%s">Articles</a>' % reverse(
"admin:admin_views_article_changelist"
)
self.assertNotContains(
addpage,
change_list_link,
msg_prefix=(
"User restricted to add permission is given link to change list view "
"in breadcrumbs."
),
)
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertRedirects(post, self.index_url)
self.assertEqual(Article.objects.count(), 4)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, "Greetings from a created object")
self.client.post(reverse("admin:logout"))
# The addition was logged correctly
addition_log = LogEntry.objects.all()[0]
new_article = Article.objects.last()
article_ct = ContentType.objects.get_for_model(Article)
self.assertEqual(addition_log.user_id, self.adduser.pk)
self.assertEqual(addition_log.content_type_id, article_ct.pk)
self.assertEqual(addition_log.object_id, str(new_article.pk))
self.assertEqual(addition_log.object_repr, "Døm ikke")
self.assertEqual(addition_log.action_flag, ADDITION)
self.assertEqual(addition_log.get_change_message(), "Added.")
# Super can add too, but is redirected to the change list view
self.client.force_login(self.superuser)
addpage = self.client.get(reverse("admin:admin_views_article_add"))
self.assertContains(
addpage,
change_list_link,
msg_prefix=(
"Unrestricted user is not given link to change list view in "
"breadcrumbs."
),
)
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertRedirects(post, reverse("admin:admin_views_article_changelist"))
self.assertEqual(Article.objects.count(), 5)
self.client.post(reverse("admin:logout"))
# 8509 - if a normal user is already logged in, it is possible
# to change user into the superuser without error
self.client.force_login(self.joepublicuser)
# Check and make sure that if user expires, data still persists
self.client.force_login(self.superuser)
# make sure the view removes test cookie
self.assertIs(self.client.session.test_cookie_worked(), False)
@mock.patch("django.contrib.admin.options.InlineModelAdmin.has_change_permission")
def test_add_view_with_view_only_inlines(self, has_change_permission):
"""User with add permission to a section but view-only for inlines."""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("add", Section._meta))
)
self.client.force_login(self.viewuser)
# Valid POST creates a new section.
data = {
"name": "New obj",
"article_set-TOTAL_FORMS": 0,
"article_set-INITIAL_FORMS": 0,
}
response = self.client.post(reverse("admin:admin_views_section_add"), data)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(Section.objects.latest("id").name, data["name"])
# InlineModelAdmin.has_change_permission()'s obj argument is always
# None during object add.
self.assertEqual(
[obj for (request, obj), _ in has_change_permission.call_args_list],
[None, None],
)
def test_change_view(self):
"""Change view should restrict access and allow users to edit items."""
change_dict = {
"title": "Ikke fordømt",
"content": "<p>edited article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
"section": self.s1.pk,
}
article_change_url = reverse(
"admin:admin_views_article_change", args=(self.a1.pk,)
)
article_changelist_url = reverse("admin:admin_views_article_changelist")
# add user should not be able to view the list of article or change any of them
self.client.force_login(self.adduser)
response = self.client.get(article_changelist_url)
self.assertEqual(response.status_code, 403)
response = self.client.get(article_change_url)
self.assertEqual(response.status_code, 403)
post = self.client.post(article_change_url, change_dict)
self.assertEqual(post.status_code, 403)
self.client.post(reverse("admin:logout"))
# view user can view articles but not make changes.
self.client.force_login(self.viewuser)
response = self.client.get(article_changelist_url)
self.assertContains(
response,
"<title>Select article to view | Django site admin</title>",
)
self.assertContains(response, "<h1>Select article to view</h1>")
self.assertEqual(response.context["title"], "Select article to view")
response = self.client.get(article_change_url)
self.assertContains(response, "<title>View article | Django site admin</title>")
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(response, "<label>Extra form field:</label>")
self.assertContains(
response,
'<a href="/test_admin/admin/admin_views/article/" class="closelink">Close'
"</a>",
)
self.assertEqual(response.context["title"], "View article")
post = self.client.post(article_change_url, change_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(
Article.objects.get(pk=self.a1.pk).content, "<p>Middle content</p>"
)
self.client.post(reverse("admin:logout"))
# change user can view all items and edit them
self.client.force_login(self.changeuser)
response = self.client.get(article_changelist_url)
self.assertEqual(response.context["title"], "Select article to change")
self.assertContains(
response,
"<title>Select article to change | Django site admin</title>",
)
self.assertContains(response, "<h1>Select article to change</h1>")
response = self.client.get(article_change_url)
self.assertEqual(response.context["title"], "Change article")
self.assertContains(
response,
"<title>Change article | Django site admin</title>",
)
self.assertContains(response, "<h1>Change article</h1>")
post = self.client.post(article_change_url, change_dict)
self.assertRedirects(post, article_changelist_url)
self.assertEqual(
Article.objects.get(pk=self.a1.pk).content, "<p>edited article</p>"
)
# one error in form should produce singular error message, multiple
# errors plural.
change_dict["title"] = ""
post = self.client.post(article_change_url, change_dict)
self.assertContains(
post,
"Please correct the error below.",
msg_prefix=(
"Singular error message not found in response to post with one error"
),
)
change_dict["content"] = ""
post = self.client.post(article_change_url, change_dict)
self.assertContains(
post,
"Please correct the errors below.",
msg_prefix=(
"Plural error message not found in response to post with multiple "
"errors"
),
)
self.client.post(reverse("admin:logout"))
# Test redirection when using row-level change permissions. Refs #11513.
r1 = RowLevelChangePermissionModel.objects.create(id=1, name="odd id")
r2 = RowLevelChangePermissionModel.objects.create(id=2, name="even id")
r3 = RowLevelChangePermissionModel.objects.create(id=3, name="odd id mult 3")
r6 = RowLevelChangePermissionModel.objects.create(id=6, name="even id mult 3")
change_url_1 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r1.pk,)
)
change_url_2 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r2.pk,)
)
change_url_3 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r3.pk,)
)
change_url_6 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r6.pk,)
)
logins = [
self.superuser,
self.viewuser,
self.adduser,
self.changeuser,
self.deleteuser,
]
for login_user in logins:
with self.subTest(login_user.username):
self.client.force_login(login_user)
response = self.client.get(change_url_1)
self.assertEqual(response.status_code, 403)
response = self.client.post(change_url_1, {"name": "changed"})
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=1).name, "odd id"
)
self.assertEqual(response.status_code, 403)
response = self.client.get(change_url_2)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_2, {"name": "changed"})
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=2).name, "changed"
)
self.assertRedirects(response, self.index_url)
response = self.client.get(change_url_3)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_3, {"name": "changed"})
self.assertEqual(response.status_code, 403)
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=3).name,
"odd id mult 3",
)
response = self.client.get(change_url_6)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_6, {"name": "changed"})
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=6).name, "changed"
)
self.assertRedirects(response, self.index_url)
self.client.post(reverse("admin:logout"))
for login_user in [self.joepublicuser, self.nostaffuser]:
with self.subTest(login_user.username):
self.client.force_login(login_user)
response = self.client.get(change_url_1, follow=True)
self.assertContains(response, "login-form")
response = self.client.post(
change_url_1, {"name": "changed"}, follow=True
)
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=1).name, "odd id"
)
self.assertContains(response, "login-form")
response = self.client.get(change_url_2, follow=True)
self.assertContains(response, "login-form")
response = self.client.post(
change_url_2, {"name": "changed again"}, follow=True
)
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=2).name, "changed"
)
self.assertContains(response, "login-form")
self.client.post(reverse("admin:logout"))
def test_change_view_without_object_change_permission(self):
"""
The object should be read-only if the user has permission to view it
and change objects of that type but not to change the current object.
"""
change_url = reverse("admin9:admin_views_article_change", args=(self.a1.pk,))
self.client.force_login(self.viewuser)
response = self.client.get(change_url)
self.assertEqual(response.context["title"], "View article")
self.assertContains(response, "<title>View article | Django site admin</title>")
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(
response,
'<a href="/test_admin/admin9/admin_views/article/" class="closelink">Close'
"</a>",
)
def test_change_view_save_as_new(self):
"""
'Save as new' should raise PermissionDenied for users without the 'add'
permission.
"""
change_dict_save_as_new = {
"_saveasnew": "Save as new",
"title": "Ikke fordømt",
"content": "<p>edited article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
"section": self.s1.pk,
}
article_change_url = reverse(
"admin:admin_views_article_change", args=(self.a1.pk,)
)
# Add user can perform "Save as new".
article_count = Article.objects.count()
self.client.force_login(self.adduser)
post = self.client.post(article_change_url, change_dict_save_as_new)
self.assertRedirects(post, self.index_url)
self.assertEqual(Article.objects.count(), article_count + 1)
self.client.logout()
# Change user cannot perform "Save as new" (no 'add' permission).
article_count = Article.objects.count()
self.client.force_login(self.changeuser)
post = self.client.post(article_change_url, change_dict_save_as_new)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), article_count)
# User with both add and change permissions should be redirected to the
# change page for the newly created object.
article_count = Article.objects.count()
self.client.force_login(self.superuser)
post = self.client.post(article_change_url, change_dict_save_as_new)
self.assertEqual(Article.objects.count(), article_count + 1)
new_article = Article.objects.latest("id")
self.assertRedirects(
post, reverse("admin:admin_views_article_change", args=(new_article.pk,))
)
def test_change_view_with_view_only_inlines(self):
"""
User with change permission to a section but view-only for inlines.
"""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("change", Section._meta))
)
self.client.force_login(self.viewuser)
# GET shows inlines.
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 3)
# Valid POST changes the name.
data = {
"name": "Can edit name with view-only inlines",
"article_set-TOTAL_FORMS": 3,
"article_set-INITIAL_FORMS": 3,
}
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Section.objects.get(pk=self.s1.pk).name, data["name"])
# Invalid POST reshows inlines.
del data["name"]
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 3)
def test_change_view_with_view_only_last_inline(self):
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("view", Section._meta))
)
self.client.force_login(self.viewuser)
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 3)
# The last inline is not marked as empty.
self.assertContains(response, 'id="article_set-2"')
def test_change_view_with_view_and_add_inlines(self):
"""User has view and add permissions on the inline model."""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("change", Section._meta))
)
self.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("add", Article._meta))
)
self.client.force_login(self.viewuser)
# GET shows inlines.
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 6)
# Valid POST creates a new article.
data = {
"name": "Can edit name with view-only inlines",
"article_set-TOTAL_FORMS": 6,
"article_set-INITIAL_FORMS": 3,
"article_set-3-id": [""],
"article_set-3-title": ["A title"],
"article_set-3-content": ["Added content"],
"article_set-3-date_0": ["2008-3-18"],
"article_set-3-date_1": ["11:54:58"],
"article_set-3-section": [str(self.s1.pk)],
}
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Section.objects.get(pk=self.s1.pk).name, data["name"])
self.assertEqual(Article.objects.count(), 4)
# Invalid POST reshows inlines.
del data["name"]
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 6)
def test_change_view_with_view_and_delete_inlines(self):
"""User has view and delete permissions on the inline model."""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("change", Section._meta))
)
self.client.force_login(self.viewuser)
data = {
"name": "Name is required.",
"article_set-TOTAL_FORMS": 6,
"article_set-INITIAL_FORMS": 3,
"article_set-0-id": [str(self.a1.pk)],
"article_set-0-DELETE": ["on"],
}
# Inline POST details are ignored without delete permission.
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Article.objects.count(), 3)
# Deletion successful when delete permission is added.
self.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("delete", Article._meta))
)
data = {
"name": "Name is required.",
"article_set-TOTAL_FORMS": 6,
"article_set-INITIAL_FORMS": 3,
"article_set-0-id": [str(self.a1.pk)],
"article_set-0-DELETE": ["on"],
}
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Article.objects.count(), 2)
def test_delete_view(self):
"""Delete view should restrict access and actually delete items."""
delete_dict = {"post": "yes"}
delete_url = reverse("admin:admin_views_article_delete", args=(self.a1.pk,))
# add user should not be able to delete articles
self.client.force_login(self.adduser)
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 403)
post = self.client.post(delete_url, delete_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.logout()
# view user should not be able to delete articles
self.client.force_login(self.viewuser)
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 403)
post = self.client.post(delete_url, delete_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.logout()
# Delete user can delete
self.client.force_login(self.deleteuser)
response = self.client.get(
reverse("admin:admin_views_section_delete", args=(self.s1.pk,))
)
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Articles: 3</li>")
# test response contains link to related Article
self.assertContains(response, "admin_views/article/%s/" % self.a1.pk)
response = self.client.get(delete_url)
self.assertContains(response, "admin_views/article/%s/" % self.a1.pk)
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Articles: 1</li>")
post = self.client.post(delete_url, delete_dict)
self.assertRedirects(post, self.index_url)
self.assertEqual(Article.objects.count(), 2)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "Greetings from a deleted object")
article_ct = ContentType.objects.get_for_model(Article)
logged = LogEntry.objects.get(content_type=article_ct, action_flag=DELETION)
self.assertEqual(logged.object_id, str(self.a1.pk))
def test_delete_view_with_no_default_permissions(self):
"""
The delete view allows users to delete collected objects without a
'delete' permission (ReadOnlyPizza.Meta.default_permissions is empty).
"""
pizza = ReadOnlyPizza.objects.create(name="Double Cheese")
delete_url = reverse("admin:admin_views_readonlypizza_delete", args=(pizza.pk,))
self.client.force_login(self.adduser)
response = self.client.get(delete_url)
self.assertContains(response, "admin_views/readonlypizza/%s/" % pizza.pk)
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Read only pizzas: 1</li>")
post = self.client.post(delete_url, {"post": "yes"})
self.assertRedirects(
post, reverse("admin:admin_views_readonlypizza_changelist")
)
self.assertEqual(ReadOnlyPizza.objects.count(), 0)
def test_delete_view_nonexistent_obj(self):
self.client.force_login(self.deleteuser)
url = reverse("admin:admin_views_article_delete", args=("nonexistent",))
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["article with ID “nonexistent” doesn’t exist. Perhaps it was deleted?"],
)
def test_history_view(self):
"""History view should restrict access."""
# add user should not be able to view the list of article or change any of them
self.client.force_login(self.adduser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=(self.a1.pk,))
)
self.assertEqual(response.status_code, 403)
self.client.post(reverse("admin:logout"))
# view user can view all items
self.client.force_login(self.viewuser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=(self.a1.pk,))
)
self.assertEqual(response.status_code, 200)
self.client.post(reverse("admin:logout"))
# change user can view all items and edit them
self.client.force_login(self.changeuser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=(self.a1.pk,))
)
self.assertEqual(response.status_code, 200)
# Test redirection when using row-level change permissions. Refs #11513.
rl1 = RowLevelChangePermissionModel.objects.create(id=1, name="odd id")
rl2 = RowLevelChangePermissionModel.objects.create(id=2, name="even id")
logins = [
self.superuser,
self.viewuser,
self.adduser,
self.changeuser,
self.deleteuser,
]
for login_user in logins:
with self.subTest(login_user.username):
self.client.force_login(login_user)
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl1.pk,),
)
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl2.pk,),
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.client.post(reverse("admin:logout"))
for login_user in [self.joepublicuser, self.nostaffuser]:
with self.subTest(login_user.username):
self.client.force_login(login_user)
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl1.pk,),
)
response = self.client.get(url, follow=True)
self.assertContains(response, "login-form")
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl2.pk,),
)
response = self.client.get(url, follow=True)
self.assertContains(response, "login-form")
self.client.post(reverse("admin:logout"))
def test_history_view_bad_url(self):
self.client.force_login(self.changeuser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=("foo",)), follow=True
)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["article with ID “foo” doesn’t exist. Perhaps it was deleted?"],
)
def test_conditionally_show_add_section_link(self):
"""
The foreign key widget should only show the "add related" button if the
user has permission to add that related item.
"""
self.client.force_login(self.adduser)
# The user can't add sections yet, so they shouldn't see the "add section" link.
url = reverse("admin:admin_views_article_add")
add_link_text = "add_id_section"
response = self.client.get(url)
self.assertNotContains(response, add_link_text)
# Allow the user to add sections too. Now they can see the "add section" link.
user = User.objects.get(username="adduser")
perm = get_perm(Section, get_permission_codename("add", Section._meta))
user.user_permissions.add(perm)
response = self.client.get(url)
self.assertContains(response, add_link_text)
def test_conditionally_show_change_section_link(self):
"""
The foreign key widget should only show the "change related" button if
the user has permission to change that related item.
"""
def get_change_related(response):
return (
response.context["adminform"]
.form.fields["section"]
.widget.can_change_related
)
self.client.force_login(self.adduser)
# The user can't change sections yet, so they shouldn't see the
# "change section" link.
url = reverse("admin:admin_views_article_add")
change_link_text = "change_id_section"
response = self.client.get(url)
self.assertFalse(get_change_related(response))
self.assertNotContains(response, change_link_text)
# Allow the user to change sections too. Now they can see the
# "change section" link.
user = User.objects.get(username="adduser")
perm = get_perm(Section, get_permission_codename("change", Section._meta))
user.user_permissions.add(perm)
response = self.client.get(url)
self.assertTrue(get_change_related(response))
self.assertContains(response, change_link_text)
def test_conditionally_show_delete_section_link(self):
"""
The foreign key widget should only show the "delete related" button if
the user has permission to delete that related item.
"""
def get_delete_related(response):
return (
response.context["adminform"]
.form.fields["sub_section"]
.widget.can_delete_related
)
self.client.force_login(self.adduser)
# The user can't delete sections yet, so they shouldn't see the
# "delete section" link.
url = reverse("admin:admin_views_article_add")
delete_link_text = "delete_id_sub_section"
response = self.client.get(url)
self.assertFalse(get_delete_related(response))
self.assertNotContains(response, delete_link_text)
# Allow the user to delete sections too. Now they can see the
# "delete section" link.
user = User.objects.get(username="adduser")
perm = get_perm(Section, get_permission_codename("delete", Section._meta))
user.user_permissions.add(perm)
response = self.client.get(url)
self.assertTrue(get_delete_related(response))
self.assertContains(response, delete_link_text)
def test_disabled_permissions_when_logged_in(self):
self.client.force_login(self.superuser)
superuser = User.objects.get(username="super")
superuser.is_active = False
superuser.save()
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, 'id="login-form"')
self.assertNotContains(response, "Log out")
response = self.client.get(reverse("secure_view"), follow=True)
self.assertContains(response, 'id="login-form"')
def test_disabled_staff_permissions_when_logged_in(self):
self.client.force_login(self.superuser)
superuser = User.objects.get(username="super")
superuser.is_staff = False
superuser.save()
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, 'id="login-form"')
self.assertNotContains(response, "Log out")
response = self.client.get(reverse("secure_view"), follow=True)
self.assertContains(response, 'id="login-form"')
def test_app_list_permissions(self):
"""
If a user has no module perms, the app list returns a 404.
"""
opts = Article._meta
change_user = User.objects.get(username="changeuser")
permission = get_perm(Article, get_permission_codename("change", opts))
self.client.force_login(self.changeuser)
# the user has no module permissions
change_user.user_permissions.remove(permission)
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertEqual(response.status_code, 404)
# the user now has module permissions
change_user.user_permissions.add(permission)
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertEqual(response.status_code, 200)
def test_shortcut_view_only_available_to_staff(self):
"""
Only admin users should be able to use the admin shortcut view.
"""
model_ctype = ContentType.objects.get_for_model(ModelWithStringPrimaryKey)
obj = ModelWithStringPrimaryKey.objects.create(string_pk="foo")
shortcut_url = reverse("admin:view_on_site", args=(model_ctype.pk, obj.pk))
# Not logged in: we should see the login page.
response = self.client.get(shortcut_url, follow=True)
self.assertTemplateUsed(response, "admin/login.html")
# Logged in? Redirect.
self.client.force_login(self.superuser)
response = self.client.get(shortcut_url, follow=False)
# Can't use self.assertRedirects() because User.get_absolute_url() is silly.
self.assertEqual(response.status_code, 302)
# Domain may depend on contrib.sites tests also run
self.assertRegex(response.url, "http://(testserver|example.com)/dummy/foo/")
def test_has_module_permission(self):
"""
has_module_permission() returns True for all users who
have any permission for that module (add, change, or delete), so that
the module is displayed on the admin index page.
"""
self.client.force_login(self.superuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.viewuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.adduser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.changeuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.deleteuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
def test_overriding_has_module_permission(self):
"""
If has_module_permission() always returns False, the module shouldn't
be displayed on the admin index page for any users.
"""
articles = Article._meta.verbose_name_plural.title()
sections = Section._meta.verbose_name_plural.title()
index_url = reverse("admin7:index")
self.client.force_login(self.superuser)
response = self.client.get(index_url)
self.assertContains(response, sections)
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.viewuser)
response = self.client.get(index_url)
self.assertNotContains(response, "admin_views")
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.adduser)
response = self.client.get(index_url)
self.assertNotContains(response, "admin_views")
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.changeuser)
response = self.client.get(index_url)
self.assertNotContains(response, "admin_views")
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.deleteuser)
response = self.client.get(index_url)
self.assertNotContains(response, articles)
# The app list displays Sections but not Articles as the latter has
# ModelAdmin.has_module_permission() = False.
self.client.force_login(self.superuser)
response = self.client.get(reverse("admin7:app_list", args=("admin_views",)))
self.assertContains(response, sections)
self.assertNotContains(response, articles)
def test_post_save_message_no_forbidden_links_visible(self):
"""
Post-save message shouldn't contain a link to the change form if the
user doesn't have the change permission.
"""
self.client.force_login(self.adduser)
# Emulate Article creation for user with add-only permission.
post_data = {
"title": "Fun & games",
"content": "Some content",
"date_0": "2015-10-31",
"date_1": "16:35:00",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_article_add"), post_data, follow=True
)
self.assertContains(
response,
'<li class="success">The article “Fun & games” was added successfully.'
"</li>",
html=True,
)
@override_settings(
ROOT_URLCONF="admin_views.urls",
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
class AdminViewProxyModelPermissionsTests(TestCase):
"""Tests for proxy models permissions in the admin."""
@classmethod
def setUpTestData(cls):
cls.viewuser = User.objects.create_user(
username="viewuser", password="secret", is_staff=True
)
cls.adduser = User.objects.create_user(
username="adduser", password="secret", is_staff=True
)
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.deleteuser = User.objects.create_user(
username="deleteuser", password="secret", is_staff=True
)
# Setup permissions.
opts = UserProxy._meta
cls.viewuser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("view", opts))
)
cls.adduser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("add", opts))
)
cls.changeuser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("change", opts))
)
cls.deleteuser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("delete", opts))
)
# UserProxy instances.
cls.user_proxy = UserProxy.objects.create(
username="user_proxy", password="secret"
)
def test_add(self):
self.client.force_login(self.adduser)
url = reverse("admin:admin_views_userproxy_add")
data = {
"username": "can_add",
"password": "secret",
"date_joined_0": "2019-01-15",
"date_joined_1": "16:59:10",
}
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(UserProxy.objects.filter(username="can_add").exists())
def test_view(self):
self.client.force_login(self.viewuser)
response = self.client.get(reverse("admin:admin_views_userproxy_changelist"))
self.assertContains(response, "<h1>Select user proxy to view</h1>")
response = self.client.get(
reverse("admin:admin_views_userproxy_change", args=(self.user_proxy.pk,))
)
self.assertContains(response, "<h1>View user proxy</h1>")
self.assertContains(response, '<div class="readonly">user_proxy</div>')
def test_change(self):
self.client.force_login(self.changeuser)
data = {
"password": self.user_proxy.password,
"username": self.user_proxy.username,
"date_joined_0": self.user_proxy.date_joined.strftime("%Y-%m-%d"),
"date_joined_1": self.user_proxy.date_joined.strftime("%H:%M:%S"),
"first_name": "first_name",
}
url = reverse("admin:admin_views_userproxy_change", args=(self.user_proxy.pk,))
response = self.client.post(url, data)
self.assertRedirects(
response, reverse("admin:admin_views_userproxy_changelist")
)
self.assertEqual(
UserProxy.objects.get(pk=self.user_proxy.pk).first_name, "first_name"
)
def test_delete(self):
self.client.force_login(self.deleteuser)
url = reverse("admin:admin_views_userproxy_delete", args=(self.user_proxy.pk,))
response = self.client.post(url, {"post": "yes"}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFalse(UserProxy.objects.filter(pk=self.user_proxy.pk).exists())
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewsNoUrlTest(TestCase):
"""Regression test for #17333"""
@classmethod
def setUpTestData(cls):
# User who can change Reports
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.changeuser.user_permissions.add(
get_perm(Report, get_permission_codename("change", Report._meta))
)
def test_no_standard_modeladmin_urls(self):
"""Admin index views don't break when user's ModelAdmin removes standard urls"""
self.client.force_login(self.changeuser)
r = self.client.get(reverse("admin:index"))
# we shouldn't get a 500 error caused by a NoReverseMatch
self.assertEqual(r.status_code, 200)
self.client.post(reverse("admin:logout"))
@skipUnlessDBFeature("can_defer_constraint_checks")
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewDeletedObjectsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.deleteuser = User.objects.create_user(
username="deleteuser", password="secret", is_staff=True
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.v1 = Villain.objects.create(name="Adam")
cls.v2 = Villain.objects.create(name="Sue")
cls.sv1 = SuperVillain.objects.create(name="Bob")
cls.pl1 = Plot.objects.create(
name="World Domination", team_leader=cls.v1, contact=cls.v2
)
cls.pl2 = Plot.objects.create(
name="World Peace", team_leader=cls.v2, contact=cls.v2
)
cls.pl3 = Plot.objects.create(
name="Corn Conspiracy", team_leader=cls.v1, contact=cls.v1
)
cls.pd1 = PlotDetails.objects.create(details="almost finished", plot=cls.pl1)
cls.sh1 = SecretHideout.objects.create(
location="underground bunker", villain=cls.v1
)
cls.sh2 = SecretHideout.objects.create(
location="floating castle", villain=cls.sv1
)
cls.ssh1 = SuperSecretHideout.objects.create(
location="super floating castle!", supervillain=cls.sv1
)
cls.cy1 = CyclicOne.objects.create(name="I am recursive", two_id=1)
cls.cy2 = CyclicTwo.objects.create(name="I am recursive too", one_id=1)
def setUp(self):
self.client.force_login(self.superuser)
def test_nesting(self):
"""
Objects should be nested to display the relationships that
cause them to be scheduled for deletion.
"""
pattern = re.compile(
r'<li>Plot: <a href="%s">World Domination</a>\s*<ul>\s*'
r'<li>Plot details: <a href="%s">almost finished</a>'
% (
reverse("admin:admin_views_plot_change", args=(self.pl1.pk,)),
reverse("admin:admin_views_plotdetails_change", args=(self.pd1.pk,)),
)
)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v1.pk,))
)
self.assertRegex(response.content.decode(), pattern)
def test_cyclic(self):
"""
Cyclic relationships should still cause each object to only be
listed once.
"""
one = '<li>Cyclic one: <a href="%s">I am recursive</a>' % (
reverse("admin:admin_views_cyclicone_change", args=(self.cy1.pk,)),
)
two = '<li>Cyclic two: <a href="%s">I am recursive too</a>' % (
reverse("admin:admin_views_cyclictwo_change", args=(self.cy2.pk,)),
)
response = self.client.get(
reverse("admin:admin_views_cyclicone_delete", args=(self.cy1.pk,))
)
self.assertContains(response, one, 1)
self.assertContains(response, two, 1)
def test_perms_needed(self):
self.client.logout()
delete_user = User.objects.get(username="deleteuser")
delete_user.user_permissions.add(
get_perm(Plot, get_permission_codename("delete", Plot._meta))
)
self.client.force_login(self.deleteuser)
response = self.client.get(
reverse("admin:admin_views_plot_delete", args=(self.pl1.pk,))
)
self.assertContains(
response,
"your account doesn't have permission to delete the following types of "
"objects",
)
self.assertContains(response, "<li>plot details</li>")
def test_protected(self):
q = Question.objects.create(question="Why?")
a1 = Answer.objects.create(question=q, answer="Because.")
a2 = Answer.objects.create(question=q, answer="Yes.")
response = self.client.get(
reverse("admin:admin_views_question_delete", args=(q.pk,))
)
self.assertContains(
response, "would require deleting the following protected related objects"
)
self.assertContains(
response,
'<li>Answer: <a href="%s">Because.</a></li>'
% reverse("admin:admin_views_answer_change", args=(a1.pk,)),
)
self.assertContains(
response,
'<li>Answer: <a href="%s">Yes.</a></li>'
% reverse("admin:admin_views_answer_change", args=(a2.pk,)),
)
def test_post_delete_protected(self):
"""
A POST request to delete protected objects should display the page
which says the deletion is prohibited.
"""
q = Question.objects.create(question="Why?")
Answer.objects.create(question=q, answer="Because.")
response = self.client.post(
reverse("admin:admin_views_question_delete", args=(q.pk,)), {"post": "yes"}
)
self.assertEqual(Question.objects.count(), 1)
self.assertContains(
response, "would require deleting the following protected related objects"
)
def test_restricted(self):
album = Album.objects.create(title="Amaryllis")
song = Song.objects.create(album=album, name="Unity")
response = self.client.get(
reverse("admin:admin_views_album_delete", args=(album.pk,))
)
self.assertContains(
response,
"would require deleting the following protected related objects",
)
self.assertContains(
response,
'<li>Song: <a href="%s">Unity</a></li>'
% reverse("admin:admin_views_song_change", args=(song.pk,)),
)
def test_post_delete_restricted(self):
album = Album.objects.create(title="Amaryllis")
Song.objects.create(album=album, name="Unity")
response = self.client.post(
reverse("admin:admin_views_album_delete", args=(album.pk,)),
{"post": "yes"},
)
self.assertEqual(Album.objects.count(), 1)
self.assertContains(
response,
"would require deleting the following protected related objects",
)
def test_not_registered(self):
should_contain = """<li>Secret hideout: underground bunker"""
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v1.pk,))
)
self.assertContains(response, should_contain, 1)
def test_multiple_fkeys_to_same_model(self):
"""
If a deleted object has two relationships from another model,
both of those should be followed in looking for related
objects to delete.
"""
should_contain = '<li>Plot: <a href="%s">World Domination</a>' % reverse(
"admin:admin_views_plot_change", args=(self.pl1.pk,)
)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v1.pk,))
)
self.assertContains(response, should_contain)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v2.pk,))
)
self.assertContains(response, should_contain)
def test_multiple_fkeys_to_same_instance(self):
"""
If a deleted object has two relationships pointing to it from
another object, the other object should still only be listed
once.
"""
should_contain = '<li>Plot: <a href="%s">World Peace</a></li>' % reverse(
"admin:admin_views_plot_change", args=(self.pl2.pk,)
)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v2.pk,))
)
self.assertContains(response, should_contain, 1)
def test_inheritance(self):
"""
In the case of an inherited model, if either the child or
parent-model instance is deleted, both instances are listed
for deletion, as well as any relationships they have.
"""
should_contain = [
'<li>Villain: <a href="%s">Bob</a>'
% reverse("admin:admin_views_villain_change", args=(self.sv1.pk,)),
'<li>Super villain: <a href="%s">Bob</a>'
% reverse("admin:admin_views_supervillain_change", args=(self.sv1.pk,)),
"<li>Secret hideout: floating castle",
"<li>Super secret hideout: super floating castle!",
]
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.sv1.pk,))
)
for should in should_contain:
self.assertContains(response, should, 1)
response = self.client.get(
reverse("admin:admin_views_supervillain_delete", args=(self.sv1.pk,))
)
for should in should_contain:
self.assertContains(response, should, 1)
def test_generic_relations(self):
"""
If a deleted object has GenericForeignKeys pointing to it,
those objects should be listed for deletion.
"""
plot = self.pl3
tag = FunkyTag.objects.create(content_object=plot, name="hott")
should_contain = '<li>Funky tag: <a href="%s">hott' % reverse(
"admin:admin_views_funkytag_change", args=(tag.id,)
)
response = self.client.get(
reverse("admin:admin_views_plot_delete", args=(plot.pk,))
)
self.assertContains(response, should_contain)
def test_generic_relations_with_related_query_name(self):
"""
If a deleted object has GenericForeignKey with
GenericRelation(related_query_name='...') pointing to it, those objects
should be listed for deletion.
"""
bookmark = Bookmark.objects.create(name="djangoproject")
tag = FunkyTag.objects.create(content_object=bookmark, name="django")
tag_url = reverse("admin:admin_views_funkytag_change", args=(tag.id,))
should_contain = '<li>Funky tag: <a href="%s">django' % tag_url
response = self.client.get(
reverse("admin:admin_views_bookmark_delete", args=(bookmark.pk,))
)
self.assertContains(response, should_contain)
def test_delete_view_uses_get_deleted_objects(self):
"""The delete view uses ModelAdmin.get_deleted_objects()."""
book = Book.objects.create(name="Test Book")
response = self.client.get(
reverse("admin2:admin_views_book_delete", args=(book.pk,))
)
# BookAdmin.get_deleted_objects() returns custom text.
self.assertContains(response, "a deletable object")
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestGenericRelations(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.v1 = Villain.objects.create(name="Adam")
cls.pl3 = Plot.objects.create(
name="Corn Conspiracy", team_leader=cls.v1, contact=cls.v1
)
def setUp(self):
self.client.force_login(self.superuser)
def test_generic_content_object_in_list_display(self):
FunkyTag.objects.create(content_object=self.pl3, name="hott")
response = self.client.get(reverse("admin:admin_views_funkytag_changelist"))
self.assertContains(response, "%s</td>" % self.pl3)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewStringPrimaryKeyTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.pk = (
"abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 "
r"""-_.!~*'() ;/?:@&=+$, <>#%" {}|\^[]`"""
)
cls.m1 = ModelWithStringPrimaryKey.objects.create(string_pk=cls.pk)
content_type_pk = ContentType.objects.get_for_model(
ModelWithStringPrimaryKey
).pk
user_pk = cls.superuser.pk
LogEntry.objects.log_action(
user_pk,
content_type_pk,
cls.pk,
cls.pk,
2,
change_message="Changed something",
)
def setUp(self):
self.client.force_login(self.superuser)
def test_get_history_view(self):
"""
Retrieving the history for an object using urlencoded form of primary
key should work.
Refs #12349, #18550.
"""
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_history", args=(self.pk,)
)
)
self.assertContains(response, escape(self.pk))
self.assertContains(response, "Changed something")
def test_get_change_view(self):
"Retrieving the object using urlencoded form of primary key should work"
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change", args=(self.pk,)
)
)
self.assertContains(response, escape(self.pk))
def test_changelist_to_changeform_link(self):
"""
Link to the changeform of the object in changelist should use reverse()
and be quoted.
"""
response = self.client.get(
reverse("admin:admin_views_modelwithstringprimarykey_changelist")
)
# this URL now comes through reverse(), thus url quoting and iri_to_uri encoding
pk_final_url = escape(iri_to_uri(quote(self.pk)))
change_url = reverse(
"admin:admin_views_modelwithstringprimarykey_change", args=("__fk__",)
).replace("__fk__", pk_final_url)
should_contain = '<th class="field-__str__"><a href="%s">%s</a></th>' % (
change_url,
escape(self.pk),
)
self.assertContains(response, should_contain)
def test_recentactions_link(self):
"""
The link from the recent actions list referring to the changeform of
the object should be quoted.
"""
response = self.client.get(reverse("admin:index"))
link = reverse(
"admin:admin_views_modelwithstringprimarykey_change", args=(quote(self.pk),)
)
should_contain = """<a href="%s">%s</a>""" % (escape(link), escape(self.pk))
self.assertContains(response, should_contain)
def test_deleteconfirmation_link(self):
""" "
The link from the delete confirmation page referring back to the
changeform of the object should be quoted.
"""
url = reverse(
"admin:admin_views_modelwithstringprimarykey_delete", args=(quote(self.pk),)
)
response = self.client.get(url)
# this URL now comes through reverse(), thus url quoting and iri_to_uri encoding
change_url = reverse(
"admin:admin_views_modelwithstringprimarykey_change", args=("__fk__",)
).replace("__fk__", escape(iri_to_uri(quote(self.pk))))
should_contain = '<a href="%s">%s</a>' % (change_url, escape(self.pk))
self.assertContains(response, should_contain)
def test_url_conflicts_with_add(self):
"A model with a primary key that ends with add or is `add` should be visible"
add_model = ModelWithStringPrimaryKey.objects.create(
pk="i have something to add"
)
add_model.save()
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(add_model.pk),),
)
)
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
add_model2 = ModelWithStringPrimaryKey.objects.create(pk="add")
add_url = reverse("admin:admin_views_modelwithstringprimarykey_add")
change_url = reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(add_model2.pk),),
)
self.assertNotEqual(add_url, change_url)
def test_url_conflicts_with_delete(self):
"A model with a primary key that ends with delete should be visible"
delete_model = ModelWithStringPrimaryKey(pk="delete")
delete_model.save()
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(delete_model.pk),),
)
)
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
def test_url_conflicts_with_history(self):
"A model with a primary key that ends with history should be visible"
history_model = ModelWithStringPrimaryKey(pk="history")
history_model.save()
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(history_model.pk),),
)
)
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
def test_shortcut_view_with_escaping(self):
"'View on site should' work properly with char fields"
model = ModelWithStringPrimaryKey(pk="abc_123")
model.save()
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(model.pk),),
)
)
should_contain = '/%s/" class="viewsitelink">' % model.pk
self.assertContains(response, should_contain)
def test_change_view_history_link(self):
"""Object history button link should work and contain the pk value quoted."""
url = reverse(
"admin:%s_modelwithstringprimarykey_change"
% ModelWithStringPrimaryKey._meta.app_label,
args=(quote(self.pk),),
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
expected_link = reverse(
"admin:%s_modelwithstringprimarykey_history"
% ModelWithStringPrimaryKey._meta.app_label,
args=(quote(self.pk),),
)
self.assertContains(
response, '<a href="%s" class="historylink"' % escape(expected_link)
)
def test_redirect_on_add_view_continue_button(self):
"""As soon as an object is added using "Save and continue editing"
button, the user should be redirected to the object's change_view.
In case primary key is a string containing some special characters
like slash or underscore, these characters must be escaped (see #22266)
"""
response = self.client.post(
reverse("admin:admin_views_modelwithstringprimarykey_add"),
{
"string_pk": "123/history",
"_continue": "1", # Save and continue editing
},
)
self.assertEqual(response.status_code, 302) # temporary redirect
self.assertIn("/123_2Fhistory/", response.headers["location"]) # PK is quoted
@override_settings(ROOT_URLCONF="admin_views.urls")
class SecureViewTests(TestCase):
"""
Test behavior of a view protected by the staff_member_required decorator.
"""
def test_secure_view_shows_login_if_not_logged_in(self):
secure_url = reverse("secure_view")
response = self.client.get(secure_url)
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin:login"), secure_url)
)
response = self.client.get(secure_url, follow=True)
self.assertTemplateUsed(response, "admin/login.html")
self.assertEqual(response.context[REDIRECT_FIELD_NAME], secure_url)
def test_staff_member_required_decorator_works_with_argument(self):
"""
Staff_member_required decorator works with an argument
(redirect_field_name).
"""
secure_url = "/test_admin/admin/secure-view2/"
response = self.client.get(secure_url)
self.assertRedirects(
response, "%s?myfield=%s" % (reverse("admin:login"), secure_url)
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewUnicodeTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.b1 = Book.objects.create(name="Lærdommer")
cls.p1 = Promo.objects.create(name="<Promo for Lærdommer>", book=cls.b1)
cls.chap1 = Chapter.objects.create(
title="Norske bostaver æøå skaper problemer",
content="<p>Svært frustrerende med UnicodeDecodeErro</p>",
book=cls.b1,
)
cls.chap2 = Chapter.objects.create(
title="Kjærlighet",
content="<p>La kjærligheten til de lidende seire.</p>",
book=cls.b1,
)
cls.chap3 = Chapter.objects.create(
title="Kjærlighet", content="<p>Noe innhold</p>", book=cls.b1
)
cls.chap4 = ChapterXtra1.objects.create(
chap=cls.chap1, xtra="<Xtra(1) Norske bostaver æøå skaper problemer>"
)
cls.chap5 = ChapterXtra1.objects.create(
chap=cls.chap2, xtra="<Xtra(1) Kjærlighet>"
)
cls.chap6 = ChapterXtra1.objects.create(
chap=cls.chap3, xtra="<Xtra(1) Kjærlighet>"
)
cls.chap7 = ChapterXtra2.objects.create(
chap=cls.chap1, xtra="<Xtra(2) Norske bostaver æøå skaper problemer>"
)
cls.chap8 = ChapterXtra2.objects.create(
chap=cls.chap2, xtra="<Xtra(2) Kjærlighet>"
)
cls.chap9 = ChapterXtra2.objects.create(
chap=cls.chap3, xtra="<Xtra(2) Kjærlighet>"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_unicode_edit(self):
"""
A test to ensure that POST on edit_view handles non-ASCII characters.
"""
post_data = {
"name": "Test lærdommer",
# inline data
"chapter_set-TOTAL_FORMS": "6",
"chapter_set-INITIAL_FORMS": "3",
"chapter_set-MAX_NUM_FORMS": "0",
"chapter_set-0-id": self.chap1.pk,
"chapter_set-0-title": "Norske bostaver æøå skaper problemer",
"chapter_set-0-content": (
"<p>Svært frustrerende med UnicodeDecodeError</p>"
),
"chapter_set-1-id": self.chap2.id,
"chapter_set-1-title": "Kjærlighet.",
"chapter_set-1-content": (
"<p>La kjærligheten til de lidende seire.</p>"
),
"chapter_set-2-id": self.chap3.id,
"chapter_set-2-title": "Need a title.",
"chapter_set-2-content": "<p>Newest content</p>",
"chapter_set-3-id": "",
"chapter_set-3-title": "",
"chapter_set-3-content": "",
"chapter_set-4-id": "",
"chapter_set-4-title": "",
"chapter_set-4-content": "",
"chapter_set-5-id": "",
"chapter_set-5-title": "",
"chapter_set-5-content": "",
}
response = self.client.post(
reverse("admin:admin_views_book_change", args=(self.b1.pk,)), post_data
)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_unicode_delete(self):
"""
The delete_view handles non-ASCII characters
"""
delete_dict = {"post": "yes"}
delete_url = reverse("admin:admin_views_book_delete", args=(self.b1.pk,))
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(delete_url, delete_dict)
self.assertRedirects(response, reverse("admin:admin_views_book_changelist"))
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewListEditable(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.per1 = Person.objects.create(name="John Mauchly", gender=1, alive=True)
cls.per2 = Person.objects.create(name="Grace Hopper", gender=1, alive=False)
cls.per3 = Person.objects.create(name="Guido van Rossum", gender=1, alive=True)
def setUp(self):
self.client.force_login(self.superuser)
def test_inheritance(self):
Podcast.objects.create(
name="This Week in Django", release_date=datetime.date.today()
)
response = self.client.get(reverse("admin:admin_views_podcast_changelist"))
self.assertEqual(response.status_code, 200)
def test_inheritance_2(self):
Vodcast.objects.create(name="This Week in Django", released=True)
response = self.client.get(reverse("admin:admin_views_vodcast_changelist"))
self.assertEqual(response.status_code, 200)
def test_custom_pk(self):
Language.objects.create(iso="en", name="English", english_name="English")
response = self.client.get(reverse("admin:admin_views_language_changelist"))
self.assertEqual(response.status_code, 200)
def test_changelist_input_html(self):
response = self.client.get(reverse("admin:admin_views_person_changelist"))
# 2 inputs per object(the field and the hidden id field) = 6
# 4 management hidden fields = 4
# 4 action inputs (3 regular checkboxes, 1 checkbox to select all)
# main form submit button = 1
# search field and search submit button = 2
# CSRF field = 2
# field to track 'select all' across paginated views = 1
# 6 + 4 + 4 + 1 + 2 + 2 + 1 = 20 inputs
self.assertContains(response, "<input", count=21)
# 1 select per object = 3 selects
self.assertContains(response, "<select", count=4)
def test_post_messages(self):
# Ticket 12707: Saving inline editable should not show admin
# action warnings
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": str(self.per1.pk),
"form-1-gender": "2",
"form-1-id": str(self.per2.pk),
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": str(self.per3.pk),
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_person_changelist"), data, follow=True
)
self.assertEqual(len(response.context["messages"]), 1)
def test_post_submission(self):
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": str(self.per1.pk),
"form-1-gender": "2",
"form-1-id": str(self.per2.pk),
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": str(self.per3.pk),
"_save": "Save",
}
self.client.post(reverse("admin:admin_views_person_changelist"), data)
self.assertIs(Person.objects.get(name="John Mauchly").alive, False)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 2)
# test a filtered page
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "2",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(self.per1.pk),
"form-0-gender": "1",
"form-0-alive": "checked",
"form-1-id": str(self.per3.pk),
"form-1-gender": "1",
"form-1-alive": "checked",
"_save": "Save",
}
self.client.post(
reverse("admin:admin_views_person_changelist") + "?gender__exact=1", data
)
self.assertIs(Person.objects.get(name="John Mauchly").alive, True)
# test a searched page
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(self.per1.pk),
"form-0-gender": "1",
"_save": "Save",
}
self.client.post(
reverse("admin:admin_views_person_changelist") + "?q=john", data
)
self.assertIs(Person.objects.get(name="John Mauchly").alive, False)
def test_non_field_errors(self):
"""
Non-field errors are displayed for each of the forms in the
changelist's formset.
"""
fd1 = FoodDelivery.objects.create(
reference="123", driver="bill", restaurant="thai"
)
fd2 = FoodDelivery.objects.create(
reference="456", driver="bill", restaurant="india"
)
fd3 = FoodDelivery.objects.create(
reference="789", driver="bill", restaurant="pizza"
)
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(fd1.id),
"form-0-reference": "123",
"form-0-driver": "bill",
"form-0-restaurant": "thai",
# Same data as above: Forbidden because of unique_together!
"form-1-id": str(fd2.id),
"form-1-reference": "456",
"form-1-driver": "bill",
"form-1-restaurant": "thai",
"form-2-id": str(fd3.id),
"form-2-reference": "789",
"form-2-driver": "bill",
"form-2-restaurant": "pizza",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_fooddelivery_changelist"), data
)
self.assertContains(
response,
'<tr><td colspan="4"><ul class="errorlist nonfield"><li>Food delivery '
"with this Driver and Restaurant already exists.</li></ul></td></tr>",
1,
html=True,
)
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(fd1.id),
"form-0-reference": "123",
"form-0-driver": "bill",
"form-0-restaurant": "thai",
# Same data as above: Forbidden because of unique_together!
"form-1-id": str(fd2.id),
"form-1-reference": "456",
"form-1-driver": "bill",
"form-1-restaurant": "thai",
# Same data also.
"form-2-id": str(fd3.id),
"form-2-reference": "789",
"form-2-driver": "bill",
"form-2-restaurant": "thai",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_fooddelivery_changelist"), data
)
self.assertContains(
response,
'<tr><td colspan="4"><ul class="errorlist nonfield"><li>Food delivery '
"with this Driver and Restaurant already exists.</li></ul></td></tr>",
2,
html=True,
)
def test_non_form_errors(self):
# test if non-form errors are handled; ticket #12716
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(self.per2.pk),
"form-0-alive": "1",
"form-0-gender": "2",
# The form processing understands this as a list_editable "Save"
# and not an action "Go".
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_person_changelist"), data
)
self.assertContains(response, "Grace is not a Zombie")
def test_non_form_errors_is_errorlist(self):
# test if non-form errors are correctly handled; ticket #12878
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(self.per2.pk),
"form-0-alive": "1",
"form-0-gender": "2",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_person_changelist"), data
)
non_form_errors = response.context["cl"].formset.non_form_errors()
self.assertIsInstance(non_form_errors, ErrorList)
self.assertEqual(
str(non_form_errors),
str(ErrorList(["Grace is not a Zombie"], error_class="nonform")),
)
def test_list_editable_ordering(self):
collector = Collector.objects.create(id=1, name="Frederick Clegg")
Category.objects.create(id=1, order=1, collector=collector)
Category.objects.create(id=2, order=2, collector=collector)
Category.objects.create(id=3, order=0, collector=collector)
Category.objects.create(id=4, order=0, collector=collector)
# NB: The order values must be changed so that the items are reordered.
data = {
"form-TOTAL_FORMS": "4",
"form-INITIAL_FORMS": "4",
"form-MAX_NUM_FORMS": "0",
"form-0-order": "14",
"form-0-id": "1",
"form-0-collector": "1",
"form-1-order": "13",
"form-1-id": "2",
"form-1-collector": "1",
"form-2-order": "1",
"form-2-id": "3",
"form-2-collector": "1",
"form-3-order": "0",
"form-3-id": "4",
"form-3-collector": "1",
# The form processing understands this as a list_editable "Save"
# and not an action "Go".
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_category_changelist"), data
)
# Successful post will redirect
self.assertEqual(response.status_code, 302)
# The order values have been applied to the right objects
self.assertEqual(Category.objects.get(id=1).order, 14)
self.assertEqual(Category.objects.get(id=2).order, 13)
self.assertEqual(Category.objects.get(id=3).order, 1)
self.assertEqual(Category.objects.get(id=4).order, 0)
def test_list_editable_pagination(self):
"""
Pagination works for list_editable items.
"""
UnorderedObject.objects.create(id=1, name="Unordered object #1")
UnorderedObject.objects.create(id=2, name="Unordered object #2")
UnorderedObject.objects.create(id=3, name="Unordered object #3")
response = self.client.get(
reverse("admin:admin_views_unorderedobject_changelist")
)
self.assertContains(response, "Unordered object #3")
self.assertContains(response, "Unordered object #2")
self.assertNotContains(response, "Unordered object #1")
response = self.client.get(
reverse("admin:admin_views_unorderedobject_changelist") + "?p=2"
)
self.assertNotContains(response, "Unordered object #3")
self.assertNotContains(response, "Unordered object #2")
self.assertContains(response, "Unordered object #1")
def test_list_editable_action_submit(self):
# List editable changes should not be executed if the action "Go" button is
# used to submit the form.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "1",
"form-1-gender": "2",
"form-1-id": "2",
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "3",
"index": "0",
"_selected_action": ["3"],
"action": ["", "delete_selected"],
}
self.client.post(reverse("admin:admin_views_person_changelist"), data)
self.assertIs(Person.objects.get(name="John Mauchly").alive, True)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 1)
def test_list_editable_action_choices(self):
# List editable changes should be executed if the "Save" button is
# used to submit the form - any action choices should be ignored.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": str(self.per1.pk),
"form-1-gender": "2",
"form-1-id": str(self.per2.pk),
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": str(self.per3.pk),
"_save": "Save",
"_selected_action": ["1"],
"action": ["", "delete_selected"],
}
self.client.post(reverse("admin:admin_views_person_changelist"), data)
self.assertIs(Person.objects.get(name="John Mauchly").alive, False)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 2)
def test_list_editable_popup(self):
"""
Fields should not be list-editable in popups.
"""
response = self.client.get(reverse("admin:admin_views_person_changelist"))
self.assertNotEqual(response.context["cl"].list_editable, ())
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?%s" % IS_POPUP_VAR
)
self.assertEqual(response.context["cl"].list_editable, ())
def test_pk_hidden_fields(self):
"""
hidden pk fields aren't displayed in the table body and their
corresponding human-readable value is displayed instead. The hidden pk
fields are displayed but separately (not in the table) and only once.
"""
story1 = Story.objects.create(
title="The adventures of Guido", content="Once upon a time in Djangoland..."
)
story2 = Story.objects.create(
title="Crouching Tiger, Hidden Python",
content="The Python was sneaking into...",
)
response = self.client.get(reverse("admin:admin_views_story_changelist"))
# Only one hidden field, in a separate place than the table.
self.assertContains(response, 'id="id_form-0-id"', 1)
self.assertContains(response, 'id="id_form-1-id"', 1)
self.assertContains(
response,
'<div class="hiddenfields">\n'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id">'
'<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id">\n'
"</div>" % (story2.id, story1.id),
html=True,
)
self.assertContains(response, '<td class="field-id">%d</td>' % story1.id, 1)
self.assertContains(response, '<td class="field-id">%d</td>' % story2.id, 1)
def test_pk_hidden_fields_with_list_display_links(self):
"""Similarly as test_pk_hidden_fields, but when the hidden pk fields are
referenced in list_display_links.
Refs #12475.
"""
story1 = OtherStory.objects.create(
title="The adventures of Guido",
content="Once upon a time in Djangoland...",
)
story2 = OtherStory.objects.create(
title="Crouching Tiger, Hidden Python",
content="The Python was sneaking into...",
)
link1 = reverse("admin:admin_views_otherstory_change", args=(story1.pk,))
link2 = reverse("admin:admin_views_otherstory_change", args=(story2.pk,))
response = self.client.get(reverse("admin:admin_views_otherstory_changelist"))
# Only one hidden field, in a separate place than the table.
self.assertContains(response, 'id="id_form-0-id"', 1)
self.assertContains(response, 'id="id_form-1-id"', 1)
self.assertContains(
response,
'<div class="hiddenfields">\n'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id">'
'<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id">\n'
"</div>" % (story2.id, story1.id),
html=True,
)
self.assertContains(
response,
'<th class="field-id"><a href="%s">%d</a></th>' % (link1, story1.id),
1,
)
self.assertContains(
response,
'<th class="field-id"><a href="%s">%d</a></th>' % (link2, story2.id),
1,
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminSearchTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.joepublicuser = User.objects.create_user(
username="joepublic", password="secret"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.per1 = Person.objects.create(name="John Mauchly", gender=1, alive=True)
cls.per2 = Person.objects.create(name="Grace Hopper", gender=1, alive=False)
cls.per3 = Person.objects.create(name="Guido van Rossum", gender=1, alive=True)
Person.objects.create(name="John Doe", gender=1)
Person.objects.create(name='John O"Hara', gender=1)
Person.objects.create(name="John O'Hara", gender=1)
cls.t1 = Recommender.objects.create()
cls.t2 = Recommendation.objects.create(the_recommender=cls.t1)
cls.t3 = Recommender.objects.create()
cls.t4 = Recommendation.objects.create(the_recommender=cls.t3)
cls.tt1 = TitleTranslation.objects.create(title=cls.t1, text="Bar")
cls.tt2 = TitleTranslation.objects.create(title=cls.t2, text="Foo")
cls.tt3 = TitleTranslation.objects.create(title=cls.t3, text="Few")
cls.tt4 = TitleTranslation.objects.create(title=cls.t4, text="Bas")
def setUp(self):
self.client.force_login(self.superuser)
def test_search_on_sibling_models(self):
"A search that mentions sibling models"
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=bar"
)
# confirm the search returned 1 object
self.assertContains(response, "\n1 recommendation\n")
def test_with_fk_to_field(self):
"""
The to_field GET parameter is preserved when a search is performed.
Refs #10918.
"""
response = self.client.get(
reverse("admin:auth_user_changelist") + "?q=joe&%s=id" % TO_FIELD_VAR
)
self.assertContains(response, "\n1 user\n")
self.assertContains(
response,
'<input type="hidden" name="%s" value="id">' % TO_FIELD_VAR,
html=True,
)
def test_exact_matches(self):
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=bar"
)
# confirm the search returned one object
self.assertContains(response, "\n1 recommendation\n")
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=ba"
)
# confirm the search returned zero objects
self.assertContains(response, "\n0 recommendations\n")
def test_beginning_matches(self):
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?q=Gui"
)
# confirm the search returned one object
self.assertContains(response, "\n1 person\n")
self.assertContains(response, "Guido")
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?q=uido"
)
# confirm the search returned zero objects
self.assertContains(response, "\n0 persons\n")
self.assertNotContains(response, "Guido")
def test_pluggable_search(self):
PluggableSearchPerson.objects.create(name="Bob", age=10)
PluggableSearchPerson.objects.create(name="Amy", age=20)
response = self.client.get(
reverse("admin:admin_views_pluggablesearchperson_changelist") + "?q=Bob"
)
# confirm the search returned one object
self.assertContains(response, "\n1 pluggable search person\n")
self.assertContains(response, "Bob")
response = self.client.get(
reverse("admin:admin_views_pluggablesearchperson_changelist") + "?q=20"
)
# confirm the search returned one object
self.assertContains(response, "\n1 pluggable search person\n")
self.assertContains(response, "Amy")
def test_reset_link(self):
"""
Test presence of reset link in search bar ("1 result (_x total_)").
"""
# 1 query for session + 1 for fetching user
# + 1 for filtered result + 1 for filtered count
# + 1 for total count
with self.assertNumQueries(5):
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?q=Gui"
)
self.assertContains(
response,
"""<span class="small quiet">1 result (<a href="?">6 total</a>)</span>""",
html=True,
)
def test_no_total_count(self):
"""
#8408 -- "Show all" should be displayed instead of the total count if
ModelAdmin.show_full_result_count is False.
"""
# 1 query for session + 1 for fetching user
# + 1 for filtered result + 1 for filtered count
with self.assertNumQueries(4):
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=bar"
)
self.assertContains(
response,
"""<span class="small quiet">1 result (<a href="?">Show all</a>)</span>""",
html=True,
)
self.assertTrue(response.context["cl"].show_admin_actions)
def test_search_with_spaces(self):
url = reverse("admin:admin_views_person_changelist") + "?q=%s"
tests = [
('"John Doe"', 1),
("'John Doe'", 1),
("John Doe", 0),
('"John Doe" John', 1),
("'John Doe' John", 1),
("John Doe John", 0),
('"John Do"', 1),
("'John Do'", 1),
("'John O'Hara'", 0),
("'John O\\'Hara'", 1),
('"John O"Hara"', 0),
('"John O\\"Hara"', 1),
]
for search, hits in tests:
with self.subTest(search=search):
response = self.client.get(url % search)
self.assertContains(response, "\n%s person" % hits)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminInheritedInlinesTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_inline(self):
"""
Inline models which inherit from a common parent are correctly handled.
"""
foo_user = "foo username"
bar_user = "bar username"
name_re = re.compile(b'name="(.*?)"')
# test the add case
response = self.client.get(reverse("admin:admin_views_persona_add"))
names = name_re.findall(response.content)
names.remove(b"csrfmiddlewaretoken")
# make sure we have no duplicate HTML names
self.assertEqual(len(names), len(set(names)))
# test the add case
post_data = {
"name": "Test Name",
# inline data
"accounts-TOTAL_FORMS": "1",
"accounts-INITIAL_FORMS": "0",
"accounts-MAX_NUM_FORMS": "0",
"accounts-0-username": foo_user,
"accounts-2-TOTAL_FORMS": "1",
"accounts-2-INITIAL_FORMS": "0",
"accounts-2-MAX_NUM_FORMS": "0",
"accounts-2-0-username": bar_user,
}
response = self.client.post(reverse("admin:admin_views_persona_add"), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
self.assertEqual(Persona.objects.count(), 1)
self.assertEqual(FooAccount.objects.count(), 1)
self.assertEqual(BarAccount.objects.count(), 1)
self.assertEqual(FooAccount.objects.all()[0].username, foo_user)
self.assertEqual(BarAccount.objects.all()[0].username, bar_user)
self.assertEqual(Persona.objects.all()[0].accounts.count(), 2)
persona_id = Persona.objects.all()[0].id
foo_id = FooAccount.objects.all()[0].id
bar_id = BarAccount.objects.all()[0].id
# test the edit case
response = self.client.get(
reverse("admin:admin_views_persona_change", args=(persona_id,))
)
names = name_re.findall(response.content)
names.remove(b"csrfmiddlewaretoken")
# make sure we have no duplicate HTML names
self.assertEqual(len(names), len(set(names)))
post_data = {
"name": "Test Name",
"accounts-TOTAL_FORMS": "2",
"accounts-INITIAL_FORMS": "1",
"accounts-MAX_NUM_FORMS": "0",
"accounts-0-username": "%s-1" % foo_user,
"accounts-0-account_ptr": str(foo_id),
"accounts-0-persona": str(persona_id),
"accounts-2-TOTAL_FORMS": "2",
"accounts-2-INITIAL_FORMS": "1",
"accounts-2-MAX_NUM_FORMS": "0",
"accounts-2-0-username": "%s-1" % bar_user,
"accounts-2-0-account_ptr": str(bar_id),
"accounts-2-0-persona": str(persona_id),
}
response = self.client.post(
reverse("admin:admin_views_persona_change", args=(persona_id,)), post_data
)
self.assertEqual(response.status_code, 302)
self.assertEqual(Persona.objects.count(), 1)
self.assertEqual(FooAccount.objects.count(), 1)
self.assertEqual(BarAccount.objects.count(), 1)
self.assertEqual(FooAccount.objects.all()[0].username, "%s-1" % foo_user)
self.assertEqual(BarAccount.objects.all()[0].username, "%s-1" % bar_user)
self.assertEqual(Persona.objects.all()[0].accounts.count(), 2)
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestCustomChangeList(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_custom_changelist(self):
"""
Validate that a custom ChangeList class can be used (#9749)
"""
# Insert some data
post_data = {"name": "First Gadget"}
response = self.client.post(reverse("admin:admin_views_gadget_add"), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
# Hit the page once to get messages out of the queue message list
response = self.client.get(reverse("admin:admin_views_gadget_changelist"))
# Data is still not visible on the page
response = self.client.get(reverse("admin:admin_views_gadget_changelist"))
self.assertNotContains(response, "First Gadget")
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestInlineNotEditable(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_GET_parent_add(self):
"""
InlineModelAdmin broken?
"""
response = self.client.get(reverse("admin:admin_views_parent_add"))
self.assertEqual(response.status_code, 200)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminCustomQuerysetTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.pks = [EmptyModel.objects.create().id for i in range(3)]
def setUp(self):
self.client.force_login(self.superuser)
self.super_login = {
REDIRECT_FIELD_NAME: reverse("admin:index"),
"username": "super",
"password": "secret",
}
def test_changelist_view(self):
response = self.client.get(reverse("admin:admin_views_emptymodel_changelist"))
for i in self.pks:
if i > 1:
self.assertContains(response, "Primary key = %s" % i)
else:
self.assertNotContains(response, "Primary key = %s" % i)
def test_changelist_view_count_queries(self):
# create 2 Person objects
Person.objects.create(name="person1", gender=1)
Person.objects.create(name="person2", gender=2)
changelist_url = reverse("admin:admin_views_person_changelist")
# 5 queries are expected: 1 for the session, 1 for the user,
# 2 for the counts and 1 for the objects on the page
with self.assertNumQueries(5):
resp = self.client.get(changelist_url)
self.assertEqual(resp.context["selection_note"], "0 of 2 selected")
self.assertEqual(resp.context["selection_note_all"], "All 2 selected")
with self.assertNumQueries(5):
extra = {"q": "not_in_name"}
resp = self.client.get(changelist_url, extra)
self.assertEqual(resp.context["selection_note"], "0 of 0 selected")
self.assertEqual(resp.context["selection_note_all"], "All 0 selected")
with self.assertNumQueries(5):
extra = {"q": "person"}
resp = self.client.get(changelist_url, extra)
self.assertEqual(resp.context["selection_note"], "0 of 2 selected")
self.assertEqual(resp.context["selection_note_all"], "All 2 selected")
with self.assertNumQueries(5):
extra = {"gender__exact": "1"}
resp = self.client.get(changelist_url, extra)
self.assertEqual(resp.context["selection_note"], "0 of 1 selected")
self.assertEqual(resp.context["selection_note_all"], "1 selected")
def test_change_view(self):
for i in self.pks:
url = reverse("admin:admin_views_emptymodel_change", args=(i,))
response = self.client.get(url, follow=True)
if i > 1:
self.assertEqual(response.status_code, 200)
else:
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["empty model with ID “1” doesn’t exist. Perhaps it was deleted?"],
)
def test_add_model_modeladmin_defer_qs(self):
# Test for #14529. defer() is used in ModelAdmin.get_queryset()
# model has __str__ method
self.assertEqual(CoverLetter.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"author": "Candidate, Best",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_coverletter_add"), post_data, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(CoverLetter.objects.count(), 1)
# Message should contain non-ugly model verbose name
pk = CoverLetter.objects.all()[0].pk
self.assertContains(
response,
'<li class="success">The cover letter “<a href="%s">'
"Candidate, Best</a>” was added successfully.</li>"
% reverse("admin:admin_views_coverletter_change", args=(pk,)),
html=True,
)
# model has no __str__ method
self.assertEqual(ShortMessage.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"content": "What's this SMS thing?",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_shortmessage_add"), post_data, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(ShortMessage.objects.count(), 1)
# Message should contain non-ugly model verbose name
sm = ShortMessage.objects.all()[0]
self.assertContains(
response,
'<li class="success">The short message “<a href="%s">'
"%s</a>” was added successfully.</li>"
% (reverse("admin:admin_views_shortmessage_change", args=(sm.pk,)), sm),
html=True,
)
def test_add_model_modeladmin_only_qs(self):
# Test for #14529. only() is used in ModelAdmin.get_queryset()
# model has __str__ method
self.assertEqual(Telegram.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"title": "Urgent telegram",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_telegram_add"), post_data, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Telegram.objects.count(), 1)
# Message should contain non-ugly model verbose name
pk = Telegram.objects.all()[0].pk
self.assertContains(
response,
'<li class="success">The telegram “<a href="%s">'
"Urgent telegram</a>” was added successfully.</li>"
% reverse("admin:admin_views_telegram_change", args=(pk,)),
html=True,
)
# model has no __str__ method
self.assertEqual(Paper.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"title": "My Modified Paper Title",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_paper_add"), post_data, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Paper.objects.count(), 1)
# Message should contain non-ugly model verbose name
p = Paper.objects.all()[0]
self.assertContains(
response,
'<li class="success">The paper “<a href="%s">'
"%s</a>” was added successfully.</li>"
% (reverse("admin:admin_views_paper_change", args=(p.pk,)), p),
html=True,
)
def test_edit_model_modeladmin_defer_qs(self):
# Test for #14529. defer() is used in ModelAdmin.get_queryset()
# model has __str__ method
cl = CoverLetter.objects.create(author="John Doe")
self.assertEqual(CoverLetter.objects.count(), 1)
response = self.client.get(
reverse("admin:admin_views_coverletter_change", args=(cl.pk,))
)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"author": "John Doe II",
"_save": "Save",
}
url = reverse("admin:admin_views_coverletter_change", args=(cl.pk,))
response = self.client.post(url, post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(CoverLetter.objects.count(), 1)
# Message should contain non-ugly model verbose name. Instance
# representation is set by model's __str__()
self.assertContains(
response,
'<li class="success">The cover letter “<a href="%s">'
"John Doe II</a>” was changed successfully.</li>"
% reverse("admin:admin_views_coverletter_change", args=(cl.pk,)),
html=True,
)
# model has no __str__ method
sm = ShortMessage.objects.create(content="This is expensive")
self.assertEqual(ShortMessage.objects.count(), 1)
response = self.client.get(
reverse("admin:admin_views_shortmessage_change", args=(sm.pk,))
)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"content": "Too expensive",
"_save": "Save",
}
url = reverse("admin:admin_views_shortmessage_change", args=(sm.pk,))
response = self.client.post(url, post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(ShortMessage.objects.count(), 1)
# Message should contain non-ugly model verbose name. The ugly(!)
# instance representation is set by __str__().
self.assertContains(
response,
'<li class="success">The short message “<a href="%s">'
"%s</a>” was changed successfully.</li>"
% (reverse("admin:admin_views_shortmessage_change", args=(sm.pk,)), sm),
html=True,
)
def test_edit_model_modeladmin_only_qs(self):
# Test for #14529. only() is used in ModelAdmin.get_queryset()
# model has __str__ method
t = Telegram.objects.create(title="First Telegram")
self.assertEqual(Telegram.objects.count(), 1)
response = self.client.get(
reverse("admin:admin_views_telegram_change", args=(t.pk,))
)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"title": "Telegram without typo",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_telegram_change", args=(t.pk,)),
post_data,
follow=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Telegram.objects.count(), 1)
# Message should contain non-ugly model verbose name. The instance
# representation is set by model's __str__()
self.assertContains(
response,
'<li class="success">The telegram “<a href="%s">'
"Telegram without typo</a>” was changed successfully.</li>"
% reverse("admin:admin_views_telegram_change", args=(t.pk,)),
html=True,
)
# model has no __str__ method
p = Paper.objects.create(title="My Paper Title")
self.assertEqual(Paper.objects.count(), 1)
response = self.client.get(
reverse("admin:admin_views_paper_change", args=(p.pk,))
)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"title": "My Modified Paper Title",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_paper_change", args=(p.pk,)),
post_data,
follow=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Paper.objects.count(), 1)
# Message should contain non-ugly model verbose name. The ugly(!)
# instance representation is set by __str__().
self.assertContains(
response,
'<li class="success">The paper “<a href="%s">'
"%s</a>” was changed successfully.</li>"
% (reverse("admin:admin_views_paper_change", args=(p.pk,)), p),
html=True,
)
def test_history_view_custom_qs(self):
"""
Custom querysets are considered for the admin history view.
"""
self.client.post(reverse("admin:login"), self.super_login)
FilteredManager.objects.create(pk=1)
FilteredManager.objects.create(pk=2)
response = self.client.get(
reverse("admin:admin_views_filteredmanager_changelist")
)
self.assertContains(response, "PK=1")
self.assertContains(response, "PK=2")
self.assertEqual(
self.client.get(
reverse("admin:admin_views_filteredmanager_history", args=(1,))
).status_code,
200,
)
self.assertEqual(
self.client.get(
reverse("admin:admin_views_filteredmanager_history", args=(2,))
).status_code,
200,
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminInlineFileUploadTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
file1 = tempfile.NamedTemporaryFile(suffix=".file1")
file1.write(b"a" * (2**21))
filename = file1.name
file1.close()
cls.gallery = Gallery.objects.create(name="Test Gallery")
cls.picture = Picture.objects.create(
name="Test Picture",
image=filename,
gallery=cls.gallery,
)
def setUp(self):
self.client.force_login(self.superuser)
def test_form_has_multipart_enctype(self):
response = self.client.get(
reverse("admin:admin_views_gallery_change", args=(self.gallery.id,))
)
self.assertIs(response.context["has_file_field"], True)
self.assertContains(response, MULTIPART_ENCTYPE)
def test_inline_file_upload_edit_validation_error_post(self):
"""
Inline file uploads correctly display prior data (#10002).
"""
post_data = {
"name": "Test Gallery",
"pictures-TOTAL_FORMS": "2",
"pictures-INITIAL_FORMS": "1",
"pictures-MAX_NUM_FORMS": "0",
"pictures-0-id": str(self.picture.id),
"pictures-0-gallery": str(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post(
reverse("admin:admin_views_gallery_change", args=(self.gallery.id,)),
post_data,
)
self.assertContains(response, b"Currently")
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminInlineTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.collector = Collector.objects.create(pk=1, name="John Fowles")
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
"widget_set-0-name": "",
"widget_set-1-id": "",
"widget_set-1-owner": "1",
"widget_set-1-name": "",
"widget_set-2-id": "",
"widget_set-2-owner": "1",
"widget_set-2-name": "",
"doohickey_set-TOTAL_FORMS": "3",
"doohickey_set-INITIAL_FORMS": "0",
"doohickey_set-MAX_NUM_FORMS": "0",
"doohickey_set-0-owner": "1",
"doohickey_set-0-code": "",
"doohickey_set-0-name": "",
"doohickey_set-1-owner": "1",
"doohickey_set-1-code": "",
"doohickey_set-1-name": "",
"doohickey_set-2-owner": "1",
"doohickey_set-2-code": "",
"doohickey_set-2-name": "",
"grommet_set-TOTAL_FORMS": "3",
"grommet_set-INITIAL_FORMS": "0",
"grommet_set-MAX_NUM_FORMS": "0",
"grommet_set-0-code": "",
"grommet_set-0-owner": "1",
"grommet_set-0-name": "",
"grommet_set-1-code": "",
"grommet_set-1-owner": "1",
"grommet_set-1-name": "",
"grommet_set-2-code": "",
"grommet_set-2-owner": "1",
"grommet_set-2-name": "",
"whatsit_set-TOTAL_FORMS": "3",
"whatsit_set-INITIAL_FORMS": "0",
"whatsit_set-MAX_NUM_FORMS": "0",
"whatsit_set-0-owner": "1",
"whatsit_set-0-index": "",
"whatsit_set-0-name": "",
"whatsit_set-1-owner": "1",
"whatsit_set-1-index": "",
"whatsit_set-1-name": "",
"whatsit_set-2-owner": "1",
"whatsit_set-2-index": "",
"whatsit_set-2-name": "",
"fancydoodad_set-TOTAL_FORMS": "3",
"fancydoodad_set-INITIAL_FORMS": "0",
"fancydoodad_set-MAX_NUM_FORMS": "0",
"fancydoodad_set-0-doodad_ptr": "",
"fancydoodad_set-0-owner": "1",
"fancydoodad_set-0-name": "",
"fancydoodad_set-0-expensive": "on",
"fancydoodad_set-1-doodad_ptr": "",
"fancydoodad_set-1-owner": "1",
"fancydoodad_set-1-name": "",
"fancydoodad_set-1-expensive": "on",
"fancydoodad_set-2-doodad_ptr": "",
"fancydoodad_set-2-owner": "1",
"fancydoodad_set-2-name": "",
"fancydoodad_set-2-expensive": "on",
"category_set-TOTAL_FORMS": "3",
"category_set-INITIAL_FORMS": "0",
"category_set-MAX_NUM_FORMS": "0",
"category_set-0-order": "",
"category_set-0-id": "",
"category_set-0-collector": "1",
"category_set-1-order": "",
"category_set-1-id": "",
"category_set-1-collector": "1",
"category_set-2-order": "",
"category_set-2-id": "",
"category_set-2-collector": "1",
}
self.client.force_login(self.superuser)
def test_simple_inline(self):
"A simple model can be saved as inlines"
# First add a new inline
self.post_data["widget_set-0-name"] = "Widget 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1")
widget_id = Widget.objects.all()[0].id
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="widget_set-0-id"')
# No file or image fields, no enctype on the forms
self.assertIs(response.context["has_file_field"], False)
self.assertNotContains(response, MULTIPART_ENCTYPE)
# Now resave that inline
self.post_data["widget_set-INITIAL_FORMS"] = "1"
self.post_data["widget_set-0-id"] = str(widget_id)
self.post_data["widget_set-0-name"] = "Widget 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1")
# Now modify that inline
self.post_data["widget_set-INITIAL_FORMS"] = "1"
self.post_data["widget_set-0-id"] = str(widget_id)
self.post_data["widget_set-0-name"] = "Widget 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1 Updated")
def test_explicit_autofield_inline(self):
"""
A model with an explicit autofield primary key can be saved as inlines.
"""
# First add a new inline
self.post_data["grommet_set-0-name"] = "Grommet 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1")
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="grommet_set-0-code"')
# Now resave that inline
self.post_data["grommet_set-INITIAL_FORMS"] = "1"
self.post_data["grommet_set-0-code"] = str(Grommet.objects.all()[0].code)
self.post_data["grommet_set-0-name"] = "Grommet 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1")
# Now modify that inline
self.post_data["grommet_set-INITIAL_FORMS"] = "1"
self.post_data["grommet_set-0-code"] = str(Grommet.objects.all()[0].code)
self.post_data["grommet_set-0-name"] = "Grommet 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1 Updated")
def test_char_pk_inline(self):
"A model with a character PK can be saved as inlines. Regression for #10992"
# First add a new inline
self.post_data["doohickey_set-0-code"] = "DH1"
self.post_data["doohickey_set-0-name"] = "Doohickey 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1")
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="doohickey_set-0-code"')
# Now resave that inline
self.post_data["doohickey_set-INITIAL_FORMS"] = "1"
self.post_data["doohickey_set-0-code"] = "DH1"
self.post_data["doohickey_set-0-name"] = "Doohickey 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1")
# Now modify that inline
self.post_data["doohickey_set-INITIAL_FORMS"] = "1"
self.post_data["doohickey_set-0-code"] = "DH1"
self.post_data["doohickey_set-0-name"] = "Doohickey 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1 Updated")
def test_integer_pk_inline(self):
"A model with an integer PK can be saved as inlines. Regression for #10992"
# First add a new inline
self.post_data["whatsit_set-0-index"] = "42"
self.post_data["whatsit_set-0-name"] = "Whatsit 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1")
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="whatsit_set-0-index"')
# Now resave that inline
self.post_data["whatsit_set-INITIAL_FORMS"] = "1"
self.post_data["whatsit_set-0-index"] = "42"
self.post_data["whatsit_set-0-name"] = "Whatsit 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1")
# Now modify that inline
self.post_data["whatsit_set-INITIAL_FORMS"] = "1"
self.post_data["whatsit_set-0-index"] = "42"
self.post_data["whatsit_set-0-name"] = "Whatsit 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1 Updated")
def test_inherited_inline(self):
"An inherited model can be saved as inlines. Regression for #11042"
# First add a new inline
self.post_data["fancydoodad_set-0-name"] = "Fancy Doodad 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1")
doodad_pk = FancyDoodad.objects.all()[0].pk
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="fancydoodad_set-0-doodad_ptr"')
# Now resave that inline
self.post_data["fancydoodad_set-INITIAL_FORMS"] = "1"
self.post_data["fancydoodad_set-0-doodad_ptr"] = str(doodad_pk)
self.post_data["fancydoodad_set-0-name"] = "Fancy Doodad 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1")
# Now modify that inline
self.post_data["fancydoodad_set-INITIAL_FORMS"] = "1"
self.post_data["fancydoodad_set-0-doodad_ptr"] = str(doodad_pk)
self.post_data["fancydoodad_set-0-name"] = "Fancy Doodad 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1 Updated")
def test_ordered_inline(self):
"""
An inline with an editable ordering fields is updated correctly.
"""
# Create some objects with an initial ordering
Category.objects.create(id=1, order=1, collector=self.collector)
Category.objects.create(id=2, order=2, collector=self.collector)
Category.objects.create(id=3, order=0, collector=self.collector)
Category.objects.create(id=4, order=0, collector=self.collector)
# NB: The order values must be changed so that the items are reordered.
self.post_data.update(
{
"name": "Frederick Clegg",
"category_set-TOTAL_FORMS": "7",
"category_set-INITIAL_FORMS": "4",
"category_set-MAX_NUM_FORMS": "0",
"category_set-0-order": "14",
"category_set-0-id": "1",
"category_set-0-collector": "1",
"category_set-1-order": "13",
"category_set-1-id": "2",
"category_set-1-collector": "1",
"category_set-2-order": "1",
"category_set-2-id": "3",
"category_set-2-collector": "1",
"category_set-3-order": "0",
"category_set-3-id": "4",
"category_set-3-collector": "1",
"category_set-4-order": "",
"category_set-4-id": "",
"category_set-4-collector": "1",
"category_set-5-order": "",
"category_set-5-id": "",
"category_set-5-collector": "1",
"category_set-6-order": "",
"category_set-6-id": "",
"category_set-6-collector": "1",
}
)
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
# Successful post will redirect
self.assertEqual(response.status_code, 302)
# The order values have been applied to the right objects
self.assertEqual(self.collector.category_set.count(), 4)
self.assertEqual(Category.objects.get(id=1).order, 14)
self.assertEqual(Category.objects.get(id=2).order, 13)
self.assertEqual(Category.objects.get(id=3).order, 1)
self.assertEqual(Category.objects.get(id=4).order, 0)
@override_settings(ROOT_URLCONF="admin_views.urls")
class NeverCacheTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
def setUp(self):
self.client.force_login(self.superuser)
def test_admin_index(self):
"Check the never-cache status of the main index"
response = self.client.get(reverse("admin:index"))
self.assertEqual(get_max_age(response), 0)
def test_app_index(self):
"Check the never-cache status of an application index"
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertEqual(get_max_age(response), 0)
def test_model_index(self):
"Check the never-cache status of a model index"
response = self.client.get(reverse("admin:admin_views_fabric_changelist"))
self.assertEqual(get_max_age(response), 0)
def test_model_add(self):
"Check the never-cache status of a model add page"
response = self.client.get(reverse("admin:admin_views_fabric_add"))
self.assertEqual(get_max_age(response), 0)
def test_model_view(self):
"Check the never-cache status of a model edit page"
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(get_max_age(response), 0)
def test_model_history(self):
"Check the never-cache status of a model history page"
response = self.client.get(
reverse("admin:admin_views_section_history", args=(self.s1.pk,))
)
self.assertEqual(get_max_age(response), 0)
def test_model_delete(self):
"Check the never-cache status of a model delete page"
response = self.client.get(
reverse("admin:admin_views_section_delete", args=(self.s1.pk,))
)
self.assertEqual(get_max_age(response), 0)
def test_login(self):
"Check the never-cache status of login views"
self.client.logout()
response = self.client.get(reverse("admin:index"))
self.assertEqual(get_max_age(response), 0)
def test_logout(self):
"Check the never-cache status of logout view"
response = self.client.post(reverse("admin:logout"))
self.assertEqual(get_max_age(response), 0)
def test_password_change(self):
"Check the never-cache status of the password change view"
self.client.logout()
response = self.client.get(reverse("admin:password_change"))
self.assertIsNone(get_max_age(response))
def test_password_change_done(self):
"Check the never-cache status of the password change done view"
response = self.client.get(reverse("admin:password_change_done"))
self.assertIsNone(get_max_age(response))
def test_JS_i18n(self):
"Check the never-cache status of the JavaScript i18n view"
response = self.client.get(reverse("admin:jsi18n"))
self.assertIsNone(get_max_age(response))
@override_settings(ROOT_URLCONF="admin_views.urls")
class PrePopulatedTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_prepopulated_on(self):
response = self.client.get(reverse("admin:admin_views_prepopulatedpost_add"))
self.assertContains(response, ""id": "#id_slug"")
self.assertContains(
response, ""dependency_ids": ["#id_title"]"
)
self.assertContains(
response,
""id": "#id_prepopulatedsubpost_set-0-subslug"",
)
def test_prepopulated_off(self):
response = self.client.get(
reverse("admin:admin_views_prepopulatedpost_change", args=(self.p1.pk,))
)
self.assertContains(response, "A Long Title")
self.assertNotContains(response, ""id": "#id_slug"")
self.assertNotContains(
response, ""dependency_ids": ["#id_title"]"
)
self.assertNotContains(
response,
""id": "#id_prepopulatedsubpost_set-0-subslug"",
)
@override_settings(USE_THOUSAND_SEPARATOR=True)
def test_prepopulated_maxlength_localized(self):
"""
Regression test for #15938: if USE_THOUSAND_SEPARATOR is set, make sure
that maxLength (in the JavaScript) is rendered without separators.
"""
response = self.client.get(
reverse("admin:admin_views_prepopulatedpostlargeslug_add")
)
self.assertContains(response, ""maxLength": 1000") # instead of 1,000
def test_view_only_add_form(self):
"""
PrePopulatedPostReadOnlyAdmin.prepopulated_fields includes 'slug'
which is present in the add view, even if the
ModelAdmin.has_change_permission() returns False.
"""
response = self.client.get(reverse("admin7:admin_views_prepopulatedpost_add"))
self.assertContains(response, "data-prepopulated-fields=")
self.assertContains(response, ""id": "#id_slug"")
def test_view_only_change_form(self):
"""
PrePopulatedPostReadOnlyAdmin.prepopulated_fields includes 'slug'. That
doesn't break a view-only change view.
"""
response = self.client.get(
reverse("admin7:admin_views_prepopulatedpost_change", args=(self.p1.pk,))
)
self.assertContains(response, 'data-prepopulated-fields="[]"')
self.assertContains(response, '<div class="readonly">%s</div>' % self.p1.slug)
@override_settings(ROOT_URLCONF="admin_views.urls")
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ["admin_views"] + AdminSeleniumTestCase.available_apps
def setUp(self):
self.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
self.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
def test_login_button_centered(self):
from selenium.webdriver.common.by import By
self.selenium.get(self.live_server_url + reverse("admin:login"))
button = self.selenium.find_element(By.CSS_SELECTOR, ".submit-row input")
offset_left = button.get_property("offsetLeft")
offset_right = button.get_property("offsetParent").get_property(
"offsetWidth"
) - (offset_left + button.get_property("offsetWidth"))
# Use assertAlmostEqual to avoid pixel rounding errors.
self.assertAlmostEqual(offset_left, offset_right, delta=3)
def test_prepopulated_fields(self):
"""
The JavaScript-automated prepopulated fields work with the main form
and with stacked and tabular inlines.
Refs #13068, #9264, #9983, #9784.
"""
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_mainprepopulated_add")
)
self.wait_for(".select2")
# Main form ----------------------------------------------------------
self.selenium.find_element(By.ID, "id_pubdate").send_keys("2012-02-18")
self.select_option("#id_status", "option two")
self.selenium.find_element(By.ID, "id_name").send_keys(
" the mAin nÀMë and it's awεšomeıııİ"
)
slug1 = self.selenium.find_element(By.ID, "id_slug1").get_attribute("value")
slug2 = self.selenium.find_element(By.ID, "id_slug2").get_attribute("value")
slug3 = self.selenium.find_element(By.ID, "id_slug3").get_attribute("value")
self.assertEqual(slug1, "the-main-name-and-its-awesomeiiii-2012-02-18")
self.assertEqual(slug2, "option-two-the-main-name-and-its-awesomeiiii")
self.assertEqual(
slug3, "the-main-n\xe0m\xeb-and-its-aw\u03b5\u0161ome\u0131\u0131\u0131i"
)
# Stacked inlines with fieldsets -------------------------------------
# Initial inline
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-0-pubdate"
).send_keys("2011-12-17")
self.select_option("#id_relatedprepopulated_set-0-status", "option one")
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-0-name"
).send_keys(" here is a sŤāÇkeð inline ! ")
slug1 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-0-slug1"
).get_attribute("value")
slug2 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-0-slug2"
).get_attribute("value")
self.assertEqual(slug1, "here-is-a-stacked-inline-2011-12-17")
self.assertEqual(slug2, "option-one-here-is-a-stacked-inline")
initial_select2_inputs = self.selenium.find_elements(
By.CLASS_NAME, "select2-selection"
)
# Inline formsets have empty/invisible forms.
# Only the 4 visible select2 inputs are initialized.
num_initial_select2_inputs = len(initial_select2_inputs)
self.assertEqual(num_initial_select2_inputs, 4)
# Add an inline
self.selenium.find_elements(By.LINK_TEXT, "Add another Related prepopulated")[
0
].click()
self.assertEqual(
len(self.selenium.find_elements(By.CLASS_NAME, "select2-selection")),
num_initial_select2_inputs + 2,
)
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-1-pubdate"
).send_keys("1999-01-25")
self.select_option("#id_relatedprepopulated_set-1-status", "option two")
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-1-name"
).send_keys(
" now you haVe anöther sŤāÇkeð inline with a very ... "
"loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooog "
"text... "
)
slug1 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-1-slug1"
).get_attribute("value")
slug2 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-1-slug2"
).get_attribute("value")
# 50 characters maximum for slug1 field
self.assertEqual(slug1, "now-you-have-another-stacked-inline-with-a-very-lo")
# 60 characters maximum for slug2 field
self.assertEqual(
slug2, "option-two-now-you-have-another-stacked-inline-with-a-very-l"
)
# Tabular inlines ----------------------------------------------------
# Initial inline
element = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-status"
)
self.selenium.execute_script("window.scrollTo(0, %s);" % element.location["y"])
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-pubdate"
).send_keys("1234-12-07")
self.select_option("#id_relatedprepopulated_set-2-0-status", "option two")
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-name"
).send_keys("And now, with a tÃbűlaŘ inline !!!")
slug1 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-slug1"
).get_attribute("value")
slug2 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-slug2"
).get_attribute("value")
self.assertEqual(slug1, "and-now-with-a-tabular-inline-1234-12-07")
self.assertEqual(slug2, "option-two-and-now-with-a-tabular-inline")
# Add an inline
# Button may be outside the browser frame.
element = self.selenium.find_elements(
By.LINK_TEXT, "Add another Related prepopulated"
)[1]
self.selenium.execute_script("window.scrollTo(0, %s);" % element.location["y"])
element.click()
self.assertEqual(
len(self.selenium.find_elements(By.CLASS_NAME, "select2-selection")),
num_initial_select2_inputs + 4,
)
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-1-pubdate"
).send_keys("1981-08-22")
self.select_option("#id_relatedprepopulated_set-2-1-status", "option one")
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-1-name"
).send_keys(r'tÃbűlaŘ inline with ignored ;"&*^\%$#@-/`~ characters')
slug1 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-1-slug1"
).get_attribute("value")
slug2 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-1-slug2"
).get_attribute("value")
self.assertEqual(slug1, "tabular-inline-with-ignored-characters-1981-08-22")
self.assertEqual(slug2, "option-one-tabular-inline-with-ignored-characters")
# Add an inline without an initial inline.
# The button is outside of the browser frame.
self.selenium.execute_script("window.scrollTo(0, document.body.scrollHeight);")
self.selenium.find_elements(By.LINK_TEXT, "Add another Related prepopulated")[
2
].click()
self.assertEqual(
len(self.selenium.find_elements(By.CLASS_NAME, "select2-selection")),
num_initial_select2_inputs + 6,
)
# Stacked Inlines without fieldsets ----------------------------------
# Initial inline.
row_id = "id_relatedprepopulated_set-4-0-"
self.selenium.find_element(By.ID, f"{row_id}pubdate").send_keys("2011-12-12")
self.select_option(f"#{row_id}status", "option one")
self.selenium.find_element(By.ID, f"{row_id}name").send_keys(
" sŤāÇkeð inline ! "
)
slug1 = self.selenium.find_element(By.ID, f"{row_id}slug1").get_attribute(
"value"
)
slug2 = self.selenium.find_element(By.ID, f"{row_id}slug2").get_attribute(
"value"
)
self.assertEqual(slug1, "stacked-inline-2011-12-12")
self.assertEqual(slug2, "option-one")
# Add inline.
self.selenium.find_elements(
By.LINK_TEXT,
"Add another Related prepopulated",
)[3].click()
row_id = "id_relatedprepopulated_set-4-1-"
self.selenium.find_element(By.ID, f"{row_id}pubdate").send_keys("1999-01-20")
self.select_option(f"#{row_id}status", "option two")
self.selenium.find_element(By.ID, f"{row_id}name").send_keys(
" now you haVe anöther sŤāÇkeð inline with a very loooong "
)
slug1 = self.selenium.find_element(By.ID, f"{row_id}slug1").get_attribute(
"value"
)
slug2 = self.selenium.find_element(By.ID, f"{row_id}slug2").get_attribute(
"value"
)
self.assertEqual(slug1, "now-you-have-another-stacked-inline-with-a-very-lo")
self.assertEqual(slug2, "option-two")
# Save and check that everything is properly stored in the database
with self.wait_page_loaded():
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.assertEqual(MainPrepopulated.objects.count(), 1)
MainPrepopulated.objects.get(
name=" the mAin nÀMë and it's awεšomeıııİ",
pubdate="2012-02-18",
status="option two",
slug1="the-main-name-and-its-awesomeiiii-2012-02-18",
slug2="option-two-the-main-name-and-its-awesomeiiii",
slug3="the-main-nàmë-and-its-awεšomeıııi",
)
self.assertEqual(RelatedPrepopulated.objects.count(), 6)
RelatedPrepopulated.objects.get(
name=" here is a sŤāÇkeð inline ! ",
pubdate="2011-12-17",
status="option one",
slug1="here-is-a-stacked-inline-2011-12-17",
slug2="option-one-here-is-a-stacked-inline",
)
RelatedPrepopulated.objects.get(
# 75 characters in name field
name=(
" now you haVe anöther sŤāÇkeð inline with a very ... "
"loooooooooooooooooo"
),
pubdate="1999-01-25",
status="option two",
slug1="now-you-have-another-stacked-inline-with-a-very-lo",
slug2="option-two-now-you-have-another-stacked-inline-with-a-very-l",
)
RelatedPrepopulated.objects.get(
name="And now, with a tÃbűlaŘ inline !!!",
pubdate="1234-12-07",
status="option two",
slug1="and-now-with-a-tabular-inline-1234-12-07",
slug2="option-two-and-now-with-a-tabular-inline",
)
RelatedPrepopulated.objects.get(
name=r'tÃbűlaŘ inline with ignored ;"&*^\%$#@-/`~ characters',
pubdate="1981-08-22",
status="option one",
slug1="tabular-inline-with-ignored-characters-1981-08-22",
slug2="option-one-tabular-inline-with-ignored-characters",
)
def test_populate_existing_object(self):
"""
The prepopulation works for existing objects too, as long as
the original field is empty (#19082).
"""
from selenium.webdriver.common.by import By
# Slugs are empty to start with.
item = MainPrepopulated.objects.create(
name=" this is the mAin nÀMë",
pubdate="2012-02-18",
status="option two",
slug1="",
slug2="",
)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
object_url = self.live_server_url + reverse(
"admin:admin_views_mainprepopulated_change", args=(item.id,)
)
self.selenium.get(object_url)
self.selenium.find_element(By.ID, "id_name").send_keys(" the best")
# The slugs got prepopulated since they were originally empty
slug1 = self.selenium.find_element(By.ID, "id_slug1").get_attribute("value")
slug2 = self.selenium.find_element(By.ID, "id_slug2").get_attribute("value")
self.assertEqual(slug1, "this-is-the-main-name-the-best-2012-02-18")
self.assertEqual(slug2, "option-two-this-is-the-main-name-the-best")
# Save the object
with self.wait_page_loaded():
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.get(object_url)
self.selenium.find_element(By.ID, "id_name").send_keys(" hello")
# The slugs got prepopulated didn't change since they were originally not empty
slug1 = self.selenium.find_element(By.ID, "id_slug1").get_attribute("value")
slug2 = self.selenium.find_element(By.ID, "id_slug2").get_attribute("value")
self.assertEqual(slug1, "this-is-the-main-name-the-best-2012-02-18")
self.assertEqual(slug2, "option-two-this-is-the-main-name-the-best")
def test_collapsible_fieldset(self):
"""
The 'collapse' class in fieldsets definition allows to
show/hide the appropriate field section.
"""
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_article_add")
)
self.assertFalse(self.selenium.find_element(By.ID, "id_title").is_displayed())
self.selenium.find_elements(By.LINK_TEXT, "Show")[0].click()
self.assertTrue(self.selenium.find_element(By.ID, "id_title").is_displayed())
self.assertEqual(
self.selenium.find_element(By.ID, "fieldsetcollapser0").text, "Hide"
)
def test_selectbox_height_collapsible_fieldset(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super",
password="secret",
login_url=reverse("admin7:index"),
)
url = self.live_server_url + reverse("admin7:admin_views_pizza_add")
self.selenium.get(url)
self.selenium.find_elements(By.LINK_TEXT, "Show")[0].click()
from_filter_box = self.selenium.find_element(By.ID, "id_toppings_filter")
from_box = self.selenium.find_element(By.ID, "id_toppings_from")
to_filter_box = self.selenium.find_element(By.ID, "id_toppings_filter_selected")
to_box = self.selenium.find_element(By.ID, "id_toppings_to")
self.assertEqual(
(
to_filter_box.get_property("offsetHeight")
+ to_box.get_property("offsetHeight")
),
(
from_filter_box.get_property("offsetHeight")
+ from_box.get_property("offsetHeight")
),
)
def test_selectbox_height_not_collapsible_fieldset(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super",
password="secret",
login_url=reverse("admin7:index"),
)
url = self.live_server_url + reverse("admin7:admin_views_question_add")
self.selenium.get(url)
from_filter_box = self.selenium.find_element(
By.ID, "id_related_questions_filter"
)
from_box = self.selenium.find_element(By.ID, "id_related_questions_from")
to_filter_box = self.selenium.find_element(
By.ID, "id_related_questions_filter_selected"
)
to_box = self.selenium.find_element(By.ID, "id_related_questions_to")
self.assertEqual(
(
to_filter_box.get_property("offsetHeight")
+ to_box.get_property("offsetHeight")
),
(
from_filter_box.get_property("offsetHeight")
+ from_box.get_property("offsetHeight")
),
)
def test_first_field_focus(self):
"""JavaScript-assisted auto-focus on first usable form field."""
from selenium.webdriver.common.by import By
# First form field has a single widget
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
with self.wait_page_loaded():
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_picture_add")
)
self.assertEqual(
self.selenium.switch_to.active_element,
self.selenium.find_element(By.ID, "id_name"),
)
# First form field has a MultiWidget
with self.wait_page_loaded():
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_reservation_add")
)
self.assertEqual(
self.selenium.switch_to.active_element,
self.selenium.find_element(By.ID, "id_start_date_0"),
)
def test_cancel_delete_confirmation(self):
"Cancelling the deletion of an object takes the user back one page."
from selenium.webdriver.common.by import By
pizza = Pizza.objects.create(name="Double Cheese")
url = reverse("admin:admin_views_pizza_change", args=(pizza.id,))
full_url = self.live_server_url + url
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(full_url)
self.selenium.find_element(By.CLASS_NAME, "deletelink").click()
# Click 'cancel' on the delete page.
self.selenium.find_element(By.CLASS_NAME, "cancel-link").click()
# Wait until we're back on the change page.
self.wait_for_text("#content h1", "Change pizza")
self.assertEqual(self.selenium.current_url, full_url)
self.assertEqual(Pizza.objects.count(), 1)
def test_cancel_delete_related_confirmation(self):
"""
Cancelling the deletion of an object with relations takes the user back
one page.
"""
from selenium.webdriver.common.by import By
pizza = Pizza.objects.create(name="Double Cheese")
topping1 = Topping.objects.create(name="Cheddar")
topping2 = Topping.objects.create(name="Mozzarella")
pizza.toppings.add(topping1, topping2)
url = reverse("admin:admin_views_pizza_change", args=(pizza.id,))
full_url = self.live_server_url + url
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(full_url)
self.selenium.find_element(By.CLASS_NAME, "deletelink").click()
# Click 'cancel' on the delete page.
self.selenium.find_element(By.CLASS_NAME, "cancel-link").click()
# Wait until we're back on the change page.
self.wait_for_text("#content h1", "Change pizza")
self.assertEqual(self.selenium.current_url, full_url)
self.assertEqual(Pizza.objects.count(), 1)
self.assertEqual(Topping.objects.count(), 2)
def test_list_editable_popups(self):
"""
list_editable foreign keys have add/change popups.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
s1 = Section.objects.create(name="Test section")
Article.objects.create(
title="foo",
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=s1,
)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_article_changelist")
)
# Change popup
self.selenium.find_element(By.ID, "change_id_form-0-section").click()
self.wait_for_and_switch_to_popup()
self.wait_for_text("#content h1", "Change section")
name_input = self.selenium.find_element(By.ID, "id_name")
name_input.clear()
name_input.send_keys("<i>edited section</i>")
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
# Hide sidebar.
toggle_button = self.selenium.find_element(
By.CSS_SELECTOR, "#toggle-nav-sidebar"
)
toggle_button.click()
select = Select(self.selenium.find_element(By.ID, "id_form-0-section"))
self.assertEqual(select.first_selected_option.text, "<i>edited section</i>")
# Rendered select2 input.
select2_display = self.selenium.find_element(
By.CLASS_NAME, "select2-selection__rendered"
)
# Clear button (×\n) is included in text.
self.assertEqual(select2_display.text, "×\n<i>edited section</i>")
# Add popup
self.selenium.find_element(By.ID, "add_id_form-0-section").click()
self.wait_for_and_switch_to_popup()
self.wait_for_text("#content h1", "Add section")
self.selenium.find_element(By.ID, "id_name").send_keys("new section")
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
select = Select(self.selenium.find_element(By.ID, "id_form-0-section"))
self.assertEqual(select.first_selected_option.text, "new section")
select2_display = self.selenium.find_element(
By.CLASS_NAME, "select2-selection__rendered"
)
# Clear button (×\n) is included in text.
self.assertEqual(select2_display.text, "×\nnew section")
def test_inline_uuid_pk_edit_with_popup(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
parent = ParentWithUUIDPK.objects.create(title="test")
related_with_parent = RelatedWithUUIDPKModel.objects.create(parent=parent)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
change_url = reverse(
"admin:admin_views_relatedwithuuidpkmodel_change",
args=(related_with_parent.id,),
)
self.selenium.get(self.live_server_url + change_url)
self.selenium.find_element(By.ID, "change_id_parent").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
select = Select(self.selenium.find_element(By.ID, "id_parent"))
self.assertEqual(select.first_selected_option.text, str(parent.id))
self.assertEqual(
select.first_selected_option.get_attribute("value"), str(parent.id)
)
def test_inline_uuid_pk_add_with_popup(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(
self.live_server_url
+ reverse("admin:admin_views_relatedwithuuidpkmodel_add")
)
self.selenium.find_element(By.ID, "add_id_parent").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.ID, "id_title").send_keys("test")
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
select = Select(self.selenium.find_element(By.ID, "id_parent"))
uuid_id = str(ParentWithUUIDPK.objects.first().id)
self.assertEqual(select.first_selected_option.text, uuid_id)
self.assertEqual(select.first_selected_option.get_attribute("value"), uuid_id)
def test_inline_uuid_pk_delete_with_popup(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
parent = ParentWithUUIDPK.objects.create(title="test")
related_with_parent = RelatedWithUUIDPKModel.objects.create(parent=parent)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
change_url = reverse(
"admin:admin_views_relatedwithuuidpkmodel_change",
args=(related_with_parent.id,),
)
self.selenium.get(self.live_server_url + change_url)
self.selenium.find_element(By.ID, "delete_id_parent").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.XPATH, '//input[@value="Yes, I’m sure"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
select = Select(self.selenium.find_element(By.ID, "id_parent"))
self.assertEqual(ParentWithUUIDPK.objects.count(), 0)
self.assertEqual(select.first_selected_option.text, "---------")
self.assertEqual(select.first_selected_option.get_attribute("value"), "")
def test_inline_with_popup_cancel_delete(self):
"""Clicking ""No, take me back" on a delete popup closes the window."""
from selenium.webdriver.common.by import By
parent = ParentWithUUIDPK.objects.create(title="test")
related_with_parent = RelatedWithUUIDPKModel.objects.create(parent=parent)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
change_url = reverse(
"admin:admin_views_relatedwithuuidpkmodel_change",
args=(related_with_parent.id,),
)
self.selenium.get(self.live_server_url + change_url)
self.selenium.find_element(By.ID, "delete_id_parent").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.XPATH, '//a[text()="No, take me back"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
self.assertEqual(len(self.selenium.window_handles), 1)
def test_list_editable_raw_id_fields(self):
from selenium.webdriver.common.by import By
parent = ParentWithUUIDPK.objects.create(title="test")
parent2 = ParentWithUUIDPK.objects.create(title="test2")
RelatedWithUUIDPKModel.objects.create(parent=parent)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
change_url = reverse(
"admin:admin_views_relatedwithuuidpkmodel_changelist",
current_app=site2.name,
)
self.selenium.get(self.live_server_url + change_url)
self.selenium.find_element(By.ID, "lookup_id_form-0-parent").click()
self.wait_for_and_switch_to_popup()
# Select "parent2" in the popup.
self.selenium.find_element(By.LINK_TEXT, str(parent2.pk)).click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
# The newly selected pk should appear in the raw id input.
value = self.selenium.find_element(By.ID, "id_form-0-parent").get_attribute(
"value"
)
self.assertEqual(value, str(parent2.pk))
def test_input_element_font(self):
"""
Browsers' default stylesheets override the font of inputs. The admin
adds additional CSS to handle this.
"""
from selenium.webdriver.common.by import By
self.selenium.get(self.live_server_url + reverse("admin:login"))
element = self.selenium.find_element(By.ID, "id_username")
# Some browsers quotes the fonts, some don't.
fonts = [
font.strip().strip('"')
for font in element.value_of_css_property("font-family").split(",")
]
self.assertEqual(
fonts,
[
"-apple-system",
"BlinkMacSystemFont",
"Segoe UI",
"system-ui",
"Roboto",
"Helvetica Neue",
"Arial",
"sans-serif",
"Apple Color Emoji",
"Segoe UI Emoji",
"Segoe UI Symbol",
"Noto Color Emoji",
],
)
def test_search_input_filtered_page(self):
from selenium.webdriver.common.by import By
Person.objects.create(name="Guido van Rossum", gender=1, alive=True)
Person.objects.create(name="Grace Hopper", gender=1, alive=False)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
person_url = reverse("admin:admin_views_person_changelist") + "?q=Gui"
self.selenium.get(self.live_server_url + person_url)
self.assertGreater(
self.selenium.find_element(By.ID, "searchbar").rect["width"],
50,
)
def test_related_popup_index(self):
"""
Create a chain of 'self' related objects via popups.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin:admin_views_box_add", current_app=site.name)
self.selenium.get(self.live_server_url + add_url)
base_window = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup()
popup_window_test = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "id_title").send_keys("test")
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup(num_windows=3)
popup_window_test2 = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "id_title").send_keys("test2")
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup(num_windows=4)
self.selenium.find_element(By.ID, "id_title").send_keys("test3")
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(popup_window_test2)
select = Select(self.selenium.find_element(By.ID, "id_next_box"))
next_box_id = str(Box.objects.get(title="test3").id)
self.assertEqual(
select.first_selected_option.get_attribute("value"), next_box_id
)
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(popup_window_test)
select = Select(self.selenium.find_element(By.ID, "id_next_box"))
next_box_id = str(Box.objects.get(title="test2").id)
self.assertEqual(
select.first_selected_option.get_attribute("value"), next_box_id
)
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(base_window)
select = Select(self.selenium.find_element(By.ID, "id_next_box"))
next_box_id = str(Box.objects.get(title="test").id)
self.assertEqual(
select.first_selected_option.get_attribute("value"), next_box_id
)
def test_related_popup_incorrect_close(self):
"""
Cleanup child popups when closing a parent popup.
"""
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin:admin_views_box_add", current_app=site.name)
self.selenium.get(self.live_server_url + add_url)
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup()
test_window = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "id_title").send_keys("test")
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup(num_windows=3)
test2_window = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "id_title").send_keys("test2")
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup(num_windows=4)
self.assertEqual(len(self.selenium.window_handles), 4)
self.selenium.switch_to.window(test2_window)
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.wait_until(lambda d: len(d.window_handles) == 2, 1)
self.assertEqual(len(self.selenium.window_handles), 2)
# Close final popup to clean up test.
self.selenium.switch_to.window(test_window)
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.wait_until(lambda d: len(d.window_handles) == 1, 1)
self.selenium.switch_to.window(self.selenium.window_handles[-1])
def test_hidden_fields_small_window(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super",
password="secret",
login_url=reverse("admin:index"),
)
self.selenium.get(self.live_server_url + reverse("admin:admin_views_story_add"))
field_title = self.selenium.find_element(By.CLASS_NAME, "field-title")
current_size = self.selenium.get_window_size()
try:
self.selenium.set_window_size(1024, 768)
self.assertIs(field_title.is_displayed(), False)
self.selenium.set_window_size(767, 575)
self.assertIs(field_title.is_displayed(), False)
finally:
self.selenium.set_window_size(current_size["width"], current_size["height"])
def test_updating_related_objects_updates_fk_selects(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
born_country_select_id = "id_born_country"
living_country_select_id = "id_living_country"
favorite_country_to_vacation_select_id = "id_favorite_country_to_vacation"
continent_select_id = "id_continent"
def _get_HTML_inside_element_by_id(id_):
return self.selenium.find_element(By.ID, id_).get_attribute("innerHTML")
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin:admin_views_traveler_add")
self.selenium.get(self.live_server_url + add_url)
# Add new Country from the born_country select.
self.selenium.find_element(By.ID, f"add_{born_country_select_id}").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.ID, "id_name").send_keys("Argentina")
continent_select = Select(
self.selenium.find_element(By.ID, continent_select_id)
)
continent_select.select_by_visible_text("South America")
self.selenium.find_element(By.CSS_SELECTOR, '[type="submit"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(born_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1" selected="">Argentina</option>
""",
)
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(living_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1">Argentina</option>
""",
)
# Argentina won't appear because favorite_country_to_vacation field has
# limit_choices_to.
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(favorite_country_to_vacation_select_id),
'<option value="" selected="">---------</option>',
)
# Add new Country from the living_country select.
self.selenium.find_element(By.ID, f"add_{living_country_select_id}").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.ID, "id_name").send_keys("Spain")
continent_select = Select(
self.selenium.find_element(By.ID, continent_select_id)
)
continent_select.select_by_visible_text("Europe")
self.selenium.find_element(By.CSS_SELECTOR, '[type="submit"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(born_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1" selected="">Argentina</option>
<option value="2">Spain</option>
""",
)
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(living_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1">Argentina</option>
<option value="2" selected="">Spain</option>
""",
)
# Spain won't appear because favorite_country_to_vacation field has
# limit_choices_to.
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(favorite_country_to_vacation_select_id),
'<option value="" selected="">---------</option>',
)
# Edit second Country created from living_country select.
favorite_select = Select(
self.selenium.find_element(By.ID, living_country_select_id)
)
favorite_select.select_by_visible_text("Spain")
self.selenium.find_element(By.ID, f"change_{living_country_select_id}").click()
self.wait_for_and_switch_to_popup()
favorite_name_input = self.selenium.find_element(By.ID, "id_name")
favorite_name_input.clear()
favorite_name_input.send_keys("Italy")
self.selenium.find_element(By.CSS_SELECTOR, '[type="submit"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(born_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1" selected="">Argentina</option>
<option value="2">Italy</option>
""",
)
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(living_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1">Argentina</option>
<option value="2" selected="">Italy</option>
""",
)
# favorite_country_to_vacation field has no options.
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(favorite_country_to_vacation_select_id),
'<option value="" selected="">---------</option>',
)
# Add a new Asian country.
self.selenium.find_element(
By.ID, f"add_{favorite_country_to_vacation_select_id}"
).click()
self.wait_for_and_switch_to_popup()
favorite_name_input = self.selenium.find_element(By.ID, "id_name")
favorite_name_input.send_keys("Qatar")
continent_select = Select(
self.selenium.find_element(By.ID, continent_select_id)
)
continent_select.select_by_visible_text("Asia")
self.selenium.find_element(By.CSS_SELECTOR, '[type="submit"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
# Submit the new Traveler.
self.selenium.find_element(By.CSS_SELECTOR, '[name="_save"]').click()
traveler = Traveler.objects.get()
self.assertEqual(traveler.born_country.name, "Argentina")
self.assertEqual(traveler.living_country.name, "Italy")
self.assertEqual(traveler.favorite_country_to_vacation.name, "Qatar")
def test_redirect_on_add_view_add_another_button(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin7:admin_views_section_add")
self.selenium.get(self.live_server_url + add_url)
name_input = self.selenium.find_element(By.ID, "id_name")
name_input.send_keys("Test section 1")
self.selenium.find_element(
By.XPATH, '//input[@value="Save and add another"]'
).click()
self.assertEqual(Section.objects.count(), 1)
name_input = self.selenium.find_element(By.ID, "id_name")
name_input.send_keys("Test section 2")
self.selenium.find_element(
By.XPATH, '//input[@value="Save and add another"]'
).click()
self.assertEqual(Section.objects.count(), 2)
def test_redirect_on_add_view_continue_button(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin7:admin_views_section_add")
self.selenium.get(self.live_server_url + add_url)
name_input = self.selenium.find_element(By.ID, "id_name")
name_input.send_keys("Test section 1")
self.selenium.find_element(
By.XPATH, '//input[@value="Save and continue editing"]'
).click()
self.assertEqual(Section.objects.count(), 1)
name_input = self.selenium.find_element(By.ID, "id_name")
name_input_value = name_input.get_attribute("value")
self.assertEqual(name_input_value, "Test section 1")
@override_settings(ROOT_URLCONF="admin_views.urls")
class ReadonlyTest(AdminFieldExtractionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_readonly_get(self):
response = self.client.get(reverse("admin:admin_views_post_add"))
self.assertNotContains(response, 'name="posted"')
# 3 fields + 2 submit buttons + 5 inline management form fields, + 2
# hidden fields for inlines + 1 field for the inline + 2 empty form
# + 1 logout form.
self.assertContains(response, "<input", count=17)
self.assertContains(response, formats.localize(datetime.date.today()))
self.assertContains(response, "<label>Awesomeness level:</label>")
self.assertContains(response, "Very awesome.")
self.assertContains(response, "Unknown coolness.")
self.assertContains(response, "foo")
# Multiline text in a readonly field gets <br> tags
self.assertContains(response, "Multiline<br>test<br>string")
self.assertContains(
response,
'<div class="readonly">Multiline<br>html<br>content</div>',
html=True,
)
self.assertContains(response, "InlineMultiline<br>test<br>string")
self.assertContains(
response,
formats.localize(datetime.date.today() - datetime.timedelta(days=7)),
)
self.assertContains(response, '<div class="form-row field-coolness">')
self.assertContains(response, '<div class="form-row field-awesomeness_level">')
self.assertContains(response, '<div class="form-row field-posted">')
self.assertContains(response, '<div class="form-row field-value">')
self.assertContains(response, '<div class="form-row">')
self.assertContains(response, '<div class="help"', 3)
self.assertContains(
response,
'<div class="help" id="id_title_helptext">Some help text for the title '
"(with Unicode ŠĐĆŽćžšđ)</div>",
html=True,
)
self.assertContains(
response,
'<div class="help" id="id_content_helptext">Some help text for the content '
"(with Unicode ŠĐĆŽćžšđ)</div>",
html=True,
)
self.assertContains(
response,
'<div class="help">Some help text for the date (with Unicode ŠĐĆŽćžšđ)'
"</div>",
html=True,
)
p = Post.objects.create(
title="I worked on readonly_fields", content="Its good stuff"
)
response = self.client.get(
reverse("admin:admin_views_post_change", args=(p.pk,))
)
self.assertContains(response, "%d amount of cool" % p.pk)
def test_readonly_text_field(self):
p = Post.objects.create(
title="Readonly test",
content="test",
readonly_content="test\r\n\r\ntest\r\n\r\ntest\r\n\r\ntest",
)
Link.objects.create(
url="http://www.djangoproject.com",
post=p,
readonly_link_content="test\r\nlink",
)
response = self.client.get(
reverse("admin:admin_views_post_change", args=(p.pk,))
)
# Checking readonly field.
self.assertContains(response, "test<br><br>test<br><br>test<br><br>test")
# Checking readonly field in inline.
self.assertContains(response, "test<br>link")
def test_readonly_post(self):
data = {
"title": "Django Got Readonly Fields",
"content": "This is an incredible development.",
"link_set-TOTAL_FORMS": "1",
"link_set-INITIAL_FORMS": "0",
"link_set-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse("admin:admin_views_post_add"), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Post.objects.count(), 1)
p = Post.objects.get()
self.assertEqual(p.posted, datetime.date.today())
data["posted"] = "10-8-1990" # some date that's not today
response = self.client.post(reverse("admin:admin_views_post_add"), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Post.objects.count(), 2)
p = Post.objects.order_by("-id")[0]
self.assertEqual(p.posted, datetime.date.today())
def test_readonly_manytomany(self):
"Regression test for #13004"
response = self.client.get(reverse("admin:admin_views_pizza_add"))
self.assertEqual(response.status_code, 200)
def test_user_password_change_limited_queryset(self):
su = User.objects.filter(is_superuser=True)[0]
response = self.client.get(
reverse("admin2:auth_user_password_change", args=(su.pk,))
)
self.assertEqual(response.status_code, 404)
def test_change_form_renders_correct_null_choice_value(self):
"""
Regression test for #17911.
"""
choice = Choice.objects.create(choice=None)
response = self.client.get(
reverse("admin:admin_views_choice_change", args=(choice.pk,))
)
self.assertContains(
response, '<div class="readonly">No opinion</div>', html=True
)
def _test_readonly_foreignkey_links(self, admin_site):
"""
ForeignKey readonly fields render as links if the target model is
registered in admin.
"""
chapter = Chapter.objects.create(
title="Chapter 1",
content="content",
book=Book.objects.create(name="Book 1"),
)
language = Language.objects.create(iso="_40", name="Test")
obj = ReadOnlyRelatedField.objects.create(
chapter=chapter,
language=language,
user=self.superuser,
)
response = self.client.get(
reverse(
f"{admin_site}:admin_views_readonlyrelatedfield_change", args=(obj.pk,)
),
)
# Related ForeignKey object registered in admin.
user_url = reverse(f"{admin_site}:auth_user_change", args=(self.superuser.pk,))
self.assertContains(
response,
'<div class="readonly"><a href="%s">super</a></div>' % user_url,
html=True,
)
# Related ForeignKey with the string primary key registered in admin.
language_url = reverse(
f"{admin_site}:admin_views_language_change",
args=(quote(language.pk),),
)
self.assertContains(
response,
'<div class="readonly"><a href="%s">_40</a></div>' % language_url,
html=True,
)
# Related ForeignKey object not registered in admin.
self.assertContains(
response, '<div class="readonly">Chapter 1</div>', html=True
)
def test_readonly_foreignkey_links_default_admin_site(self):
self._test_readonly_foreignkey_links("admin")
def test_readonly_foreignkey_links_custom_admin_site(self):
self._test_readonly_foreignkey_links("namespaced_admin")
def test_readonly_manytomany_backwards_ref(self):
"""
Regression test for #16433 - backwards references for related objects
broke if the related field is read-only due to the help_text attribute
"""
topping = Topping.objects.create(name="Salami")
pizza = Pizza.objects.create(name="Americano")
pizza.toppings.add(topping)
response = self.client.get(reverse("admin:admin_views_topping_add"))
self.assertEqual(response.status_code, 200)
def test_readonly_manytomany_forwards_ref(self):
topping = Topping.objects.create(name="Salami")
pizza = Pizza.objects.create(name="Americano")
pizza.toppings.add(topping)
response = self.client.get(
reverse("admin:admin_views_pizza_change", args=(pizza.pk,))
)
self.assertContains(response, "<label>Toppings:</label>", html=True)
self.assertContains(response, '<div class="readonly">Salami</div>', html=True)
def test_readonly_onetoone_backwards_ref(self):
"""
Can reference a reverse OneToOneField in ModelAdmin.readonly_fields.
"""
v1 = Villain.objects.create(name="Adam")
pl = Plot.objects.create(name="Test Plot", team_leader=v1, contact=v1)
pd = PlotDetails.objects.create(details="Brand New Plot", plot=pl)
response = self.client.get(
reverse("admin:admin_views_plotproxy_change", args=(pl.pk,))
)
field = self.get_admin_readonly_field(response, "plotdetails")
pd_url = reverse("admin:admin_views_plotdetails_change", args=(pd.pk,))
self.assertEqual(field.contents(), '<a href="%s">Brand New Plot</a>' % pd_url)
# The reverse relation also works if the OneToOneField is null.
pd.plot = None
pd.save()
response = self.client.get(
reverse("admin:admin_views_plotproxy_change", args=(pl.pk,))
)
field = self.get_admin_readonly_field(response, "plotdetails")
self.assertEqual(field.contents(), "-") # default empty value
def test_readonly_field_overrides(self):
"""
Regression test for #22087 - ModelForm Meta overrides are ignored by
AdminReadonlyField
"""
p = FieldOverridePost.objects.create(title="Test Post", content="Test Content")
response = self.client.get(
reverse("admin:admin_views_fieldoverridepost_change", args=(p.pk,))
)
self.assertContains(
response,
'<div class="help">Overridden help text for the date</div>',
html=True,
)
self.assertContains(
response,
'<label for="id_public">Overridden public label:</label>',
html=True,
)
self.assertNotContains(
response, "Some help text for the date (with Unicode ŠĐĆŽćžšđ)"
)
def test_correct_autoescaping(self):
"""
Make sure that non-field readonly elements are properly autoescaped (#24461)
"""
section = Section.objects.create(name="<a>evil</a>")
response = self.client.get(
reverse("admin:admin_views_section_change", args=(section.pk,))
)
self.assertNotContains(response, "<a>evil</a>", status_code=200)
self.assertContains(response, "<a>evil</a>", status_code=200)
def test_label_suffix_translated(self):
pizza = Pizza.objects.create(name="Americano")
url = reverse("admin:admin_views_pizza_change", args=(pizza.pk,))
with self.settings(LANGUAGE_CODE="fr"):
response = self.client.get(url)
self.assertContains(response, "<label>Toppings\u00A0:</label>", html=True)
@override_settings(ROOT_URLCONF="admin_views.urls")
class LimitChoicesToInAdminTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_limit_choices_to_as_callable(self):
"""Test for ticket 2445 changes to admin."""
threepwood = Character.objects.create(
username="threepwood",
last_action=datetime.datetime.today() + datetime.timedelta(days=1),
)
marley = Character.objects.create(
username="marley",
last_action=datetime.datetime.today() - datetime.timedelta(days=1),
)
response = self.client.get(reverse("admin:admin_views_stumpjoke_add"))
# The allowed option should appear twice; the limited option should not appear.
self.assertContains(response, threepwood.username, count=2)
self.assertNotContains(response, marley.username)
@override_settings(ROOT_URLCONF="admin_views.urls")
class RawIdFieldsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_limit_choices_to(self):
"""Regression test for 14880"""
actor = Actor.objects.create(name="Palin", age=27)
Inquisition.objects.create(expected=True, leader=actor, country="England")
Inquisition.objects.create(expected=False, leader=actor, country="Spain")
response = self.client.get(reverse("admin:admin_views_sketch_add"))
# Find the link
m = re.search(
rb'<a href="([^"]*)"[^>]* id="lookup_id_inquisition"', response.content
)
self.assertTrue(m) # Got a match
popup_url = m[1].decode().replace("&", "&")
# Handle relative links
popup_url = urljoin(response.request["PATH_INFO"], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step also tests integers, strings and booleans in the
# lookup query string; in model we define inquisition field to have a
# limit_choices_to option that includes a filter on a string field
# (inquisition__actor__name), a filter on an integer field
# (inquisition__actor__age), and a filter on a boolean field
# (inquisition__expected).
response2 = self.client.get(popup_url)
self.assertContains(response2, "Spain")
self.assertNotContains(response2, "England")
def test_limit_choices_to_isnull_false(self):
"""Regression test for 20182"""
Actor.objects.create(name="Palin", age=27)
Actor.objects.create(name="Kilbraken", age=50, title="Judge")
response = self.client.get(reverse("admin:admin_views_sketch_add"))
# Find the link
m = re.search(
rb'<a href="([^"]*)"[^>]* id="lookup_id_defendant0"', response.content
)
self.assertTrue(m) # Got a match
popup_url = m[1].decode().replace("&", "&")
# Handle relative links
popup_url = urljoin(response.request["PATH_INFO"], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step tests field__isnull=0 gets parsed correctly from the
# lookup query string; in model we define defendant0 field to have a
# limit_choices_to option that includes "actor__title__isnull=False".
response2 = self.client.get(popup_url)
self.assertContains(response2, "Kilbraken")
self.assertNotContains(response2, "Palin")
def test_limit_choices_to_isnull_true(self):
"""Regression test for 20182"""
Actor.objects.create(name="Palin", age=27)
Actor.objects.create(name="Kilbraken", age=50, title="Judge")
response = self.client.get(reverse("admin:admin_views_sketch_add"))
# Find the link
m = re.search(
rb'<a href="([^"]*)"[^>]* id="lookup_id_defendant1"', response.content
)
self.assertTrue(m) # Got a match
popup_url = m[1].decode().replace("&", "&")
# Handle relative links
popup_url = urljoin(response.request["PATH_INFO"], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step tests field__isnull=1 gets parsed correctly from the
# lookup query string; in model we define defendant1 field to have a
# limit_choices_to option that includes "actor__title__isnull=True".
response2 = self.client.get(popup_url)
self.assertNotContains(response2, "Kilbraken")
self.assertContains(response2, "Palin")
def test_list_display_method_same_name_as_reverse_accessor(self):
"""
Should be able to use a ModelAdmin method in list_display that has the
same name as a reverse model field ("sketch" in this case).
"""
actor = Actor.objects.create(name="Palin", age=27)
Inquisition.objects.create(expected=True, leader=actor, country="England")
response = self.client.get(reverse("admin:admin_views_inquisition_changelist"))
self.assertContains(response, "list-display-sketch")
@override_settings(ROOT_URLCONF="admin_views.urls")
class UserAdminTest(TestCase):
"""
Tests user CRUD functionality.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.adduser = User.objects.create_user(
username="adduser", password="secret", is_staff=True
)
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.per1 = Person.objects.create(name="John Mauchly", gender=1, alive=True)
cls.per2 = Person.objects.create(name="Grace Hopper", gender=1, alive=False)
cls.per3 = Person.objects.create(name="Guido van Rossum", gender=1, alive=True)
def setUp(self):
self.client.force_login(self.superuser)
def test_save_button(self):
user_count = User.objects.count()
response = self.client.post(
reverse("admin:auth_user_add"),
{
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
},
)
new_user = User.objects.get(username="newuser")
self.assertRedirects(
response, reverse("admin:auth_user_change", args=(new_user.pk,))
)
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
def test_save_continue_editing_button(self):
user_count = User.objects.count()
response = self.client.post(
reverse("admin:auth_user_add"),
{
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
"_continue": "1",
},
)
new_user = User.objects.get(username="newuser")
new_user_url = reverse("admin:auth_user_change", args=(new_user.pk,))
self.assertRedirects(response, new_user_url, fetch_redirect_response=False)
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
response = self.client.get(new_user_url)
self.assertContains(
response,
'<li class="success">The user “<a href="%s">'
"%s</a>” was added successfully. You may edit it again below.</li>"
% (new_user_url, new_user),
html=True,
)
def test_password_mismatch(self):
response = self.client.post(
reverse("admin:auth_user_add"),
{
"username": "newuser",
"password1": "newpassword",
"password2": "mismatch",
},
)
self.assertEqual(response.status_code, 200)
self.assertFormError(response.context["adminform"], "password1", [])
self.assertFormError(
response.context["adminform"],
"password2",
["The two password fields didn’t match."],
)
def test_user_fk_add_popup(self):
"""
User addition through a FK popup should return the appropriate
JavaScript response.
"""
response = self.client.get(reverse("admin:admin_views_album_add"))
self.assertContains(response, reverse("admin:auth_user_add"))
self.assertContains(
response,
'class="related-widget-wrapper-link add-related" id="add_id_owner"',
)
response = self.client.get(
reverse("admin:auth_user_add") + "?%s=1" % IS_POPUP_VAR
)
self.assertNotContains(response, 'name="_continue"')
self.assertNotContains(response, 'name="_addanother"')
data = {
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
IS_POPUP_VAR: "1",
"_save": "1",
}
response = self.client.post(
reverse("admin:auth_user_add") + "?%s=1" % IS_POPUP_VAR, data, follow=True
)
self.assertContains(response, ""obj": "newuser"")
def test_user_fk_change_popup(self):
"""
User change through a FK popup should return the appropriate JavaScript
response.
"""
response = self.client.get(reverse("admin:admin_views_album_add"))
self.assertContains(
response, reverse("admin:auth_user_change", args=("__fk__",))
)
self.assertContains(
response,
'class="related-widget-wrapper-link change-related" id="change_id_owner"',
)
user = User.objects.get(username="changeuser")
url = (
reverse("admin:auth_user_change", args=(user.pk,)) + "?%s=1" % IS_POPUP_VAR
)
response = self.client.get(url)
self.assertNotContains(response, 'name="_continue"')
self.assertNotContains(response, 'name="_addanother"')
data = {
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
"last_login_0": "2007-05-30",
"last_login_1": "13:20:10",
"date_joined_0": "2007-05-30",
"date_joined_1": "13:20:10",
IS_POPUP_VAR: "1",
"_save": "1",
}
response = self.client.post(url, data, follow=True)
self.assertContains(response, ""obj": "newuser"")
self.assertContains(response, ""action": "change"")
def test_user_fk_delete_popup(self):
"""
User deletion through a FK popup should return the appropriate
JavaScript response.
"""
response = self.client.get(reverse("admin:admin_views_album_add"))
self.assertContains(
response, reverse("admin:auth_user_delete", args=("__fk__",))
)
self.assertContains(
response,
'class="related-widget-wrapper-link change-related" id="change_id_owner"',
)
user = User.objects.get(username="changeuser")
url = (
reverse("admin:auth_user_delete", args=(user.pk,)) + "?%s=1" % IS_POPUP_VAR
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = {
"post": "yes",
IS_POPUP_VAR: "1",
}
response = self.client.post(url, data, follow=True)
self.assertContains(response, ""action": "delete"")
def test_save_add_another_button(self):
user_count = User.objects.count()
response = self.client.post(
reverse("admin:auth_user_add"),
{
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
"_addanother": "1",
},
)
new_user = User.objects.order_by("-id")[0]
self.assertRedirects(response, reverse("admin:auth_user_add"))
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
def test_user_permission_performance(self):
u = User.objects.all()[0]
# Don't depend on a warm cache, see #17377.
ContentType.objects.clear_cache()
expected_num_queries = 10 if connection.features.uses_savepoints else 8
with self.assertNumQueries(expected_num_queries):
response = self.client.get(reverse("admin:auth_user_change", args=(u.pk,)))
self.assertEqual(response.status_code, 200)
def test_form_url_present_in_context(self):
u = User.objects.all()[0]
response = self.client.get(
reverse("admin3:auth_user_password_change", args=(u.pk,))
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["form_url"], "pony")
@override_settings(ROOT_URLCONF="admin_views.urls")
class GroupAdminTest(TestCase):
"""
Tests group CRUD functionality.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_save_button(self):
group_count = Group.objects.count()
response = self.client.post(
reverse("admin:auth_group_add"),
{
"name": "newgroup",
},
)
Group.objects.order_by("-id")[0]
self.assertRedirects(response, reverse("admin:auth_group_changelist"))
self.assertEqual(Group.objects.count(), group_count + 1)
def test_group_permission_performance(self):
g = Group.objects.create(name="test_group")
# Ensure no queries are skipped due to cached content type for Group.
ContentType.objects.clear_cache()
expected_num_queries = 8 if connection.features.uses_savepoints else 6
with self.assertNumQueries(expected_num_queries):
response = self.client.get(reverse("admin:auth_group_change", args=(g.pk,)))
self.assertEqual(response.status_code, 200)
@override_settings(ROOT_URLCONF="admin_views.urls")
class CSSTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_field_prefix_css_classes(self):
"""
Fields have a CSS class name with a 'field-' prefix.
"""
response = self.client.get(reverse("admin:admin_views_post_add"))
# The main form
self.assertContains(response, 'class="form-row field-title"')
self.assertContains(response, 'class="form-row field-content"')
self.assertContains(response, 'class="form-row field-public"')
self.assertContains(response, 'class="form-row field-awesomeness_level"')
self.assertContains(response, 'class="form-row field-coolness"')
self.assertContains(response, 'class="form-row field-value"')
self.assertContains(response, 'class="form-row"') # The lambda function
# The tabular inline
self.assertContains(response, '<td class="field-url">')
self.assertContains(response, '<td class="field-posted">')
def test_index_css_classes(self):
"""
CSS class names are used for each app and model on the admin index
pages (#17050).
"""
# General index page
response = self.client.get(reverse("admin:index"))
self.assertContains(response, '<div class="app-admin_views module')
self.assertContains(response, '<tr class="model-actor">')
self.assertContains(response, '<tr class="model-album">')
# App index page
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertContains(response, '<div class="app-admin_views module')
self.assertContains(response, '<tr class="model-actor">')
self.assertContains(response, '<tr class="model-album">')
def test_app_model_in_form_body_class(self):
"""
Ensure app and model tag are correctly read by change_form template
"""
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertContains(response, '<body class=" app-admin_views model-section ')
def test_app_model_in_list_body_class(self):
"""
Ensure app and model tag are correctly read by change_list template
"""
response = self.client.get(reverse("admin:admin_views_section_changelist"))
self.assertContains(response, '<body class=" app-admin_views model-section ')
def test_app_model_in_delete_confirmation_body_class(self):
"""
Ensure app and model tag are correctly read by delete_confirmation
template
"""
response = self.client.get(
reverse("admin:admin_views_section_delete", args=(self.s1.pk,))
)
self.assertContains(response, '<body class=" app-admin_views model-section ')
def test_app_model_in_app_index_body_class(self):
"""
Ensure app and model tag are correctly read by app_index template
"""
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertContains(response, '<body class=" dashboard app-admin_views')
def test_app_model_in_delete_selected_confirmation_body_class(self):
"""
Ensure app and model tag are correctly read by
delete_selected_confirmation template
"""
action_data = {
ACTION_CHECKBOX_NAME: [self.s1.pk],
"action": "delete_selected",
"index": 0,
}
response = self.client.post(
reverse("admin:admin_views_section_changelist"), action_data
)
self.assertContains(response, '<body class=" app-admin_views model-section ')
def test_changelist_field_classes(self):
"""
Cells of the change list table should contain the field name in their
class attribute.
"""
Podcast.objects.create(name="Django Dose", release_date=datetime.date.today())
response = self.client.get(reverse("admin:admin_views_podcast_changelist"))
self.assertContains(response, '<th class="field-name">')
self.assertContains(response, '<td class="field-release_date nowrap">')
self.assertContains(response, '<td class="action-checkbox">')
try:
import docutils
except ImportError:
docutils = None
@unittest.skipUnless(docutils, "no docutils installed.")
@override_settings(ROOT_URLCONF="admin_views.urls")
@modify_settings(
INSTALLED_APPS={"append": ["django.contrib.admindocs", "django.contrib.flatpages"]}
)
class AdminDocsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_tags(self):
response = self.client.get(reverse("django-admindocs-tags"))
# The builtin tag group exists
self.assertContains(response, "<h2>Built-in tags</h2>", count=2, html=True)
# A builtin tag exists in both the index and detail
self.assertContains(
response, '<h3 id="built_in-autoescape">autoescape</h3>', html=True
)
self.assertContains(
response,
'<li><a href="#built_in-autoescape">autoescape</a></li>',
html=True,
)
# An app tag exists in both the index and detail
self.assertContains(
response, '<h3 id="flatpages-get_flatpages">get_flatpages</h3>', html=True
)
self.assertContains(
response,
'<li><a href="#flatpages-get_flatpages">get_flatpages</a></li>',
html=True,
)
# The admin list tag group exists
self.assertContains(response, "<h2>admin_list</h2>", count=2, html=True)
# An admin list tag exists in both the index and detail
self.assertContains(
response, '<h3 id="admin_list-admin_actions">admin_actions</h3>', html=True
)
self.assertContains(
response,
'<li><a href="#admin_list-admin_actions">admin_actions</a></li>',
html=True,
)
def test_filters(self):
response = self.client.get(reverse("django-admindocs-filters"))
# The builtin filter group exists
self.assertContains(response, "<h2>Built-in filters</h2>", count=2, html=True)
# A builtin filter exists in both the index and detail
self.assertContains(response, '<h3 id="built_in-add">add</h3>', html=True)
self.assertContains(
response, '<li><a href="#built_in-add">add</a></li>', html=True
)
@override_settings(
ROOT_URLCONF="admin_views.urls",
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
class ValidXHTMLTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_lang_name_present(self):
with translation.override(None):
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertNotContains(response, ' lang=""')
self.assertNotContains(response, ' xml:lang=""')
@override_settings(ROOT_URLCONF="admin_views.urls", USE_THOUSAND_SEPARATOR=True)
class DateHierarchyTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def assert_non_localized_year(self, response, year):
"""
The year is not localized with USE_THOUSAND_SEPARATOR (#15234).
"""
self.assertNotContains(response, formats.number_format(year))
def assert_contains_year_link(self, response, date):
self.assertContains(response, '?release_date__year=%d"' % date.year)
def assert_contains_month_link(self, response, date):
self.assertContains(
response,
'?release_date__month=%d&release_date__year=%d"'
% (date.month, date.year),
)
def assert_contains_day_link(self, response, date):
self.assertContains(
response,
"?release_date__day=%d&"
'release_date__month=%d&release_date__year=%d"'
% (date.day, date.month, date.year),
)
def test_empty(self):
"""
No date hierarchy links display with empty changelist.
"""
response = self.client.get(reverse("admin:admin_views_podcast_changelist"))
self.assertNotContains(response, "release_date__year=")
self.assertNotContains(response, "release_date__month=")
self.assertNotContains(response, "release_date__day=")
def test_single(self):
"""
Single day-level date hierarchy appears for single object.
"""
DATE = datetime.date(2000, 6, 30)
Podcast.objects.create(release_date=DATE)
url = reverse("admin:admin_views_podcast_changelist")
response = self.client.get(url)
self.assert_contains_day_link(response, DATE)
self.assert_non_localized_year(response, 2000)
def test_within_month(self):
"""
day-level links appear for changelist within single month.
"""
DATES = (
datetime.date(2000, 6, 30),
datetime.date(2000, 6, 15),
datetime.date(2000, 6, 3),
)
for date in DATES:
Podcast.objects.create(release_date=date)
url = reverse("admin:admin_views_podcast_changelist")
response = self.client.get(url)
for date in DATES:
self.assert_contains_day_link(response, date)
self.assert_non_localized_year(response, 2000)
def test_within_year(self):
"""
month-level links appear for changelist within single year.
"""
DATES = (
datetime.date(2000, 1, 30),
datetime.date(2000, 3, 15),
datetime.date(2000, 5, 3),
)
for date in DATES:
Podcast.objects.create(release_date=date)
url = reverse("admin:admin_views_podcast_changelist")
response = self.client.get(url)
# no day-level links
self.assertNotContains(response, "release_date__day=")
for date in DATES:
self.assert_contains_month_link(response, date)
self.assert_non_localized_year(response, 2000)
def test_multiple_years(self):
"""
year-level links appear for year-spanning changelist.
"""
DATES = (
datetime.date(2001, 1, 30),
datetime.date(2003, 3, 15),
datetime.date(2005, 5, 3),
)
for date in DATES:
Podcast.objects.create(release_date=date)
response = self.client.get(reverse("admin:admin_views_podcast_changelist"))
# no day/month-level links
self.assertNotContains(response, "release_date__day=")
self.assertNotContains(response, "release_date__month=")
for date in DATES:
self.assert_contains_year_link(response, date)
# and make sure GET parameters still behave correctly
for date in DATES:
url = "%s?release_date__year=%d" % (
reverse("admin:admin_views_podcast_changelist"),
date.year,
)
response = self.client.get(url)
self.assert_contains_month_link(response, date)
self.assert_non_localized_year(response, 2000)
self.assert_non_localized_year(response, 2003)
self.assert_non_localized_year(response, 2005)
url = "%s?release_date__year=%d&release_date__month=%d" % (
reverse("admin:admin_views_podcast_changelist"),
date.year,
date.month,
)
response = self.client.get(url)
self.assert_contains_day_link(response, date)
self.assert_non_localized_year(response, 2000)
self.assert_non_localized_year(response, 2003)
self.assert_non_localized_year(response, 2005)
def test_related_field(self):
questions_data = (
# (posted data, number of answers),
(datetime.date(2001, 1, 30), 0),
(datetime.date(2003, 3, 15), 1),
(datetime.date(2005, 5, 3), 2),
)
for date, answer_count in questions_data:
question = Question.objects.create(posted=date)
for i in range(answer_count):
question.answer_set.create()
response = self.client.get(reverse("admin:admin_views_answer_changelist"))
for date, answer_count in questions_data:
link = '?question__posted__year=%d"' % date.year
if answer_count > 0:
self.assertContains(response, link)
else:
self.assertNotContains(response, link)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminCustomSaveRelatedTests(TestCase):
"""
One can easily customize the way related objects are saved.
Refs #16115.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_should_be_able_to_edit_related_objects_on_add_view(self):
post = {
"child_set-TOTAL_FORMS": "3",
"child_set-INITIAL_FORMS": "0",
"name": "Josh Stone",
"child_set-0-name": "Paul",
"child_set-1-name": "Catherine",
}
self.client.post(reverse("admin:admin_views_parent_add"), post)
self.assertEqual(1, Parent.objects.count())
self.assertEqual(2, Child.objects.count())
children_names = list(
Child.objects.order_by("name").values_list("name", flat=True)
)
self.assertEqual("Josh Stone", Parent.objects.latest("id").name)
self.assertEqual(["Catherine Stone", "Paul Stone"], children_names)
def test_should_be_able_to_edit_related_objects_on_change_view(self):
parent = Parent.objects.create(name="Josh Stone")
paul = Child.objects.create(parent=parent, name="Paul")
catherine = Child.objects.create(parent=parent, name="Catherine")
post = {
"child_set-TOTAL_FORMS": "5",
"child_set-INITIAL_FORMS": "2",
"name": "Josh Stone",
"child_set-0-name": "Paul",
"child_set-0-id": paul.id,
"child_set-1-name": "Catherine",
"child_set-1-id": catherine.id,
}
self.client.post(
reverse("admin:admin_views_parent_change", args=(parent.id,)), post
)
children_names = list(
Child.objects.order_by("name").values_list("name", flat=True)
)
self.assertEqual("Josh Stone", Parent.objects.latest("id").name)
self.assertEqual(["Catherine Stone", "Paul Stone"], children_names)
def test_should_be_able_to_edit_related_objects_on_changelist_view(self):
parent = Parent.objects.create(name="Josh Rock")
Child.objects.create(parent=parent, name="Paul")
Child.objects.create(parent=parent, name="Catherine")
post = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": parent.id,
"form-0-name": "Josh Stone",
"_save": "Save",
}
self.client.post(reverse("admin:admin_views_parent_changelist"), post)
children_names = list(
Child.objects.order_by("name").values_list("name", flat=True)
)
self.assertEqual("Josh Stone", Parent.objects.latest("id").name)
self.assertEqual(["Catherine Stone", "Paul Stone"], children_names)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewLogoutTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def test_logout(self):
self.client.force_login(self.superuser)
response = self.client.post(reverse("admin:logout"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "registration/logged_out.html")
self.assertEqual(response.request["PATH_INFO"], reverse("admin:logout"))
self.assertFalse(response.context["has_permission"])
self.assertNotContains(
response, "user-tools"
) # user-tools div shouldn't visible.
def test_client_logout_url_can_be_used_to_login(self):
response = self.client.post(reverse("admin:logout"))
self.assertEqual(
response.status_code, 302
) # we should be redirected to the login page.
# follow the redirect and test results.
response = self.client.post(reverse("admin:logout"), follow=True)
self.assertContains(
response,
'<input type="hidden" name="next" value="%s">' % reverse("admin:index"),
)
self.assertTemplateUsed(response, "admin/login.html")
self.assertEqual(response.request["PATH_INFO"], reverse("admin:login"))
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminUserMessageTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def send_message(self, level):
"""
Helper that sends a post to the dummy test methods and asserts that a
message with the level has appeared in the response.
"""
action_data = {
ACTION_CHECKBOX_NAME: [1],
"action": "message_%s" % level,
"index": 0,
}
response = self.client.post(
reverse("admin:admin_views_usermessenger_changelist"),
action_data,
follow=True,
)
self.assertContains(
response, '<li class="%s">Test %s</li>' % (level, level), html=True
)
@override_settings(MESSAGE_LEVEL=10) # Set to DEBUG for this request
def test_message_debug(self):
self.send_message("debug")
def test_message_info(self):
self.send_message("info")
def test_message_success(self):
self.send_message("success")
def test_message_warning(self):
self.send_message("warning")
def test_message_error(self):
self.send_message("error")
def test_message_extra_tags(self):
action_data = {
ACTION_CHECKBOX_NAME: [1],
"action": "message_extra_tags",
"index": 0,
}
response = self.client.post(
reverse("admin:admin_views_usermessenger_changelist"),
action_data,
follow=True,
)
self.assertContains(
response, '<li class="extra_tag info">Test tags</li>', html=True
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminKeepChangeListFiltersTests(TestCase):
admin_site = site
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.joepublicuser = User.objects.create_user(
username="joepublic", password="secret"
)
def setUp(self):
self.client.force_login(self.superuser)
def assertURLEqual(self, url1, url2, msg_prefix=""):
"""
Assert that two URLs are equal despite the ordering
of their querystring. Refs #22360.
"""
parsed_url1 = urlparse(url1)
path1 = parsed_url1.path
parsed_qs1 = dict(parse_qsl(parsed_url1.query))
parsed_url2 = urlparse(url2)
path2 = parsed_url2.path
parsed_qs2 = dict(parse_qsl(parsed_url2.query))
for parsed_qs in [parsed_qs1, parsed_qs2]:
if "_changelist_filters" in parsed_qs:
changelist_filters = parsed_qs["_changelist_filters"]
parsed_filters = dict(parse_qsl(changelist_filters))
parsed_qs["_changelist_filters"] = parsed_filters
self.assertEqual(path1, path2)
self.assertEqual(parsed_qs1, parsed_qs2)
def test_assert_url_equal(self):
# Test equality.
change_user_url = reverse(
"admin:auth_user_change", args=(self.joepublicuser.pk,)
)
self.assertURLEqual(
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
)
# Test inequality.
with self.assertRaises(AssertionError):
self.assertURLEqual(
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D1%26is_superuser__exact%3D1".format(change_user_url),
)
# Ignore scheme and host.
self.assertURLEqual(
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
"{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
)
# Ignore ordering of querystring.
self.assertURLEqual(
"{}?is_staff__exact=0&is_superuser__exact=0".format(
reverse("admin:auth_user_changelist")
),
"{}?is_superuser__exact=0&is_staff__exact=0".format(
reverse("admin:auth_user_changelist")
),
)
# Ignore ordering of _changelist_filters.
self.assertURLEqual(
"{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
"{}?_changelist_filters="
"is_superuser__exact%3D0%26is_staff__exact%3D0".format(change_user_url),
)
def get_changelist_filters(self):
return {
"is_superuser__exact": 0,
"is_staff__exact": 0,
}
def get_changelist_filters_querystring(self):
return urlencode(self.get_changelist_filters())
def get_preserved_filters_querystring(self):
return urlencode(
{"_changelist_filters": self.get_changelist_filters_querystring()}
)
def get_sample_user_id(self):
return self.joepublicuser.pk
def get_changelist_url(self):
return "%s?%s" % (
reverse("admin:auth_user_changelist", current_app=self.admin_site.name),
self.get_changelist_filters_querystring(),
)
def get_add_url(self, add_preserved_filters=True):
url = reverse("admin:auth_user_add", current_app=self.admin_site.name)
if add_preserved_filters:
url = "%s?%s" % (url, self.get_preserved_filters_querystring())
return url
def get_change_url(self, user_id=None, add_preserved_filters=True):
if user_id is None:
user_id = self.get_sample_user_id()
url = reverse(
"admin:auth_user_change", args=(user_id,), current_app=self.admin_site.name
)
if add_preserved_filters:
url = "%s?%s" % (url, self.get_preserved_filters_querystring())
return url
def get_history_url(self, user_id=None):
if user_id is None:
user_id = self.get_sample_user_id()
return "%s?%s" % (
reverse(
"admin:auth_user_history",
args=(user_id,),
current_app=self.admin_site.name,
),
self.get_preserved_filters_querystring(),
)
def get_delete_url(self, user_id=None):
if user_id is None:
user_id = self.get_sample_user_id()
return "%s?%s" % (
reverse(
"admin:auth_user_delete",
args=(user_id,),
current_app=self.admin_site.name,
),
self.get_preserved_filters_querystring(),
)
def test_changelist_view(self):
response = self.client.get(self.get_changelist_url())
self.assertEqual(response.status_code, 200)
# Check the `change_view` link has the correct querystring.
detail_link = re.search(
'<a href="(.*?)">{}</a>'.format(self.joepublicuser.username),
response.content.decode(),
)
self.assertURLEqual(detail_link[1], self.get_change_url())
def test_change_view(self):
# Get the `change_view`.
response = self.client.get(self.get_change_url())
self.assertEqual(response.status_code, 200)
# Check the form action.
form_action = re.search(
'<form action="(.*?)" method="post" id="user_form" novalidate>',
response.content.decode(),
)
self.assertURLEqual(
form_action[1], "?%s" % self.get_preserved_filters_querystring()
)
# Check the history link.
history_link = re.search(
'<a href="(.*?)" class="historylink">History</a>', response.content.decode()
)
self.assertURLEqual(history_link[1], self.get_history_url())
# Check the delete link.
delete_link = re.search(
'<a href="(.*?)" class="deletelink">Delete</a>', response.content.decode()
)
self.assertURLEqual(delete_link[1], self.get_delete_url())
# Test redirect on "Save".
post_data = {
"username": "joepublic",
"last_login_0": "2007-05-30",
"last_login_1": "13:20:10",
"date_joined_0": "2007-05-30",
"date_joined_1": "13:20:10",
}
post_data["_save"] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertRedirects(response, self.get_changelist_url())
post_data.pop("_save")
# Test redirect on "Save and continue".
post_data["_continue"] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertRedirects(response, self.get_change_url())
post_data.pop("_continue")
# Test redirect on "Save and add new".
post_data["_addanother"] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertRedirects(response, self.get_add_url())
post_data.pop("_addanother")
def test_change_view_without_preserved_filters(self):
response = self.client.get(self.get_change_url(add_preserved_filters=False))
# The action attribute is omitted.
self.assertContains(response, '<form method="post" id="user_form" novalidate>')
def test_add_view(self):
# Get the `add_view`.
response = self.client.get(self.get_add_url())
self.assertEqual(response.status_code, 200)
# Check the form action.
form_action = re.search(
'<form action="(.*?)" method="post" id="user_form" novalidate>',
response.content.decode(),
)
self.assertURLEqual(
form_action[1], "?%s" % self.get_preserved_filters_querystring()
)
post_data = {
"username": "dummy",
"password1": "test",
"password2": "test",
}
# Test redirect on "Save".
post_data["_save"] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertRedirects(
response, self.get_change_url(User.objects.get(username="dummy").pk)
)
post_data.pop("_save")
# Test redirect on "Save and continue".
post_data["username"] = "dummy2"
post_data["_continue"] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertRedirects(
response, self.get_change_url(User.objects.get(username="dummy2").pk)
)
post_data.pop("_continue")
# Test redirect on "Save and add new".
post_data["username"] = "dummy3"
post_data["_addanother"] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertRedirects(response, self.get_add_url())
post_data.pop("_addanother")
def test_add_view_without_preserved_filters(self):
response = self.client.get(self.get_add_url(add_preserved_filters=False))
# The action attribute is omitted.
self.assertContains(response, '<form method="post" id="user_form" novalidate>')
def test_delete_view(self):
# Test redirect on "Delete".
response = self.client.post(self.get_delete_url(), {"post": "yes"})
self.assertRedirects(response, self.get_changelist_url())
def test_url_prefix(self):
context = {
"preserved_filters": self.get_preserved_filters_querystring(),
"opts": User._meta,
}
prefixes = ("", "/prefix/", "/後台/")
for prefix in prefixes:
with self.subTest(prefix=prefix), override_script_prefix(prefix):
url = reverse(
"admin:auth_user_changelist", current_app=self.admin_site.name
)
self.assertURLEqual(
self.get_changelist_url(),
add_preserved_filters(context, url),
)
class NamespacedAdminKeepChangeListFiltersTests(AdminKeepChangeListFiltersTests):
admin_site = site2
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestLabelVisibility(TestCase):
"""#11277 -Labels of hidden fields in admin were not hidden."""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_all_fields_visible(self):
response = self.client.get(reverse("admin:admin_views_emptymodelvisible_add"))
self.assert_fieldline_visible(response)
self.assert_field_visible(response, "first")
self.assert_field_visible(response, "second")
def test_all_fields_hidden(self):
response = self.client.get(reverse("admin:admin_views_emptymodelhidden_add"))
self.assert_fieldline_hidden(response)
self.assert_field_hidden(response, "first")
self.assert_field_hidden(response, "second")
def test_mixin(self):
response = self.client.get(reverse("admin:admin_views_emptymodelmixin_add"))
self.assert_fieldline_visible(response)
self.assert_field_hidden(response, "first")
self.assert_field_visible(response, "second")
def assert_field_visible(self, response, field_name):
self.assertContains(response, '<div class="fieldBox field-%s">' % field_name)
def assert_field_hidden(self, response, field_name):
self.assertContains(
response, '<div class="fieldBox field-%s hidden">' % field_name
)
def assert_fieldline_visible(self, response):
self.assertContains(response, '<div class="form-row field-first field-second">')
def assert_fieldline_hidden(self, response):
self.assertContains(response, '<div class="form-row hidden')
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewOnSiteTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = State.objects.create(name="New York")
cls.s2 = State.objects.create(name="Illinois")
cls.s3 = State.objects.create(name="California")
cls.c1 = City.objects.create(state=cls.s1, name="New York")
cls.c2 = City.objects.create(state=cls.s2, name="Chicago")
cls.c3 = City.objects.create(state=cls.s3, name="San Francisco")
cls.r1 = Restaurant.objects.create(city=cls.c1, name="Italian Pizza")
cls.r2 = Restaurant.objects.create(city=cls.c1, name="Boulevard")
cls.r3 = Restaurant.objects.create(city=cls.c2, name="Chinese Dinner")
cls.r4 = Restaurant.objects.create(city=cls.c2, name="Angels")
cls.r5 = Restaurant.objects.create(city=cls.c2, name="Take Away")
cls.r6 = Restaurant.objects.create(city=cls.c3, name="The Unknown Restaurant")
cls.w1 = Worker.objects.create(work_at=cls.r1, name="Mario", surname="Rossi")
cls.w2 = Worker.objects.create(
work_at=cls.r1, name="Antonio", surname="Bianchi"
)
cls.w3 = Worker.objects.create(work_at=cls.r1, name="John", surname="Doe")
def setUp(self):
self.client.force_login(self.superuser)
def test_add_view_form_and_formsets_run_validation(self):
"""
Issue #20522
Verifying that if the parent form fails validation, the inlines also
run validation even if validation is contingent on parent form data.
Also, assertFormError() and assertFormsetError() is usable for admin
forms and formsets.
"""
# The form validation should fail because 'some_required_info' is
# not included on the parent form, and the family_name of the parent
# does not match that of the child
post_data = {
"family_name": "Test1",
"dependentchild_set-TOTAL_FORMS": "1",
"dependentchild_set-INITIAL_FORMS": "0",
"dependentchild_set-MAX_NUM_FORMS": "1",
"dependentchild_set-0-id": "",
"dependentchild_set-0-parent": "",
"dependentchild_set-0-family_name": "Test2",
}
response = self.client.post(
reverse("admin:admin_views_parentwithdependentchildren_add"), post_data
)
self.assertFormError(
response.context["adminform"],
"some_required_info",
["This field is required."],
)
self.assertFormError(response.context["adminform"], None, [])
self.assertFormsetError(
response.context["inline_admin_formset"],
0,
None,
[
"Children must share a family name with their parents in this "
"contrived test case"
],
)
self.assertFormsetError(
response.context["inline_admin_formset"], None, None, []
)
def test_change_view_form_and_formsets_run_validation(self):
"""
Issue #20522
Verifying that if the parent form fails validation, the inlines also
run validation even if validation is contingent on parent form data
"""
pwdc = ParentWithDependentChildren.objects.create(
some_required_info=6, family_name="Test1"
)
# The form validation should fail because 'some_required_info' is
# not included on the parent form, and the family_name of the parent
# does not match that of the child
post_data = {
"family_name": "Test2",
"dependentchild_set-TOTAL_FORMS": "1",
"dependentchild_set-INITIAL_FORMS": "0",
"dependentchild_set-MAX_NUM_FORMS": "1",
"dependentchild_set-0-id": "",
"dependentchild_set-0-parent": str(pwdc.id),
"dependentchild_set-0-family_name": "Test1",
}
response = self.client.post(
reverse(
"admin:admin_views_parentwithdependentchildren_change", args=(pwdc.id,)
),
post_data,
)
self.assertFormError(
response.context["adminform"],
"some_required_info",
["This field is required."],
)
self.assertFormsetError(
response.context["inline_admin_formset"],
0,
None,
[
"Children must share a family name with their parents in this "
"contrived test case"
],
)
def test_check(self):
"The view_on_site value is either a boolean or a callable"
try:
admin = CityAdmin(City, AdminSite())
CityAdmin.view_on_site = True
self.assertEqual(admin.check(), [])
CityAdmin.view_on_site = False
self.assertEqual(admin.check(), [])
CityAdmin.view_on_site = lambda obj: obj.get_absolute_url()
self.assertEqual(admin.check(), [])
CityAdmin.view_on_site = []
self.assertEqual(
admin.check(),
[
Error(
"The value of 'view_on_site' must be a callable or a boolean "
"value.",
obj=CityAdmin,
id="admin.E025",
),
],
)
finally:
# Restore the original values for the benefit of other tests.
CityAdmin.view_on_site = True
def test_false(self):
"The 'View on site' button is not displayed if view_on_site is False"
response = self.client.get(
reverse("admin:admin_views_restaurant_change", args=(self.r1.pk,))
)
content_type_pk = ContentType.objects.get_for_model(Restaurant).pk
self.assertNotContains(
response, reverse("admin:view_on_site", args=(content_type_pk, 1))
)
def test_true(self):
"The default behavior is followed if view_on_site is True"
response = self.client.get(
reverse("admin:admin_views_city_change", args=(self.c1.pk,))
)
content_type_pk = ContentType.objects.get_for_model(City).pk
self.assertContains(
response, reverse("admin:view_on_site", args=(content_type_pk, self.c1.pk))
)
def test_callable(self):
"The right link is displayed if view_on_site is a callable"
response = self.client.get(
reverse("admin:admin_views_worker_change", args=(self.w1.pk,))
)
self.assertContains(
response, '"/worker/%s/%s/"' % (self.w1.surname, self.w1.name)
)
def test_missing_get_absolute_url(self):
"None is returned if model doesn't have get_absolute_url"
model_admin = ModelAdmin(Worker, None)
self.assertIsNone(model_admin.get_view_on_site_url(Worker()))
def test_custom_admin_site(self):
model_admin = ModelAdmin(City, customadmin.site)
content_type_pk = ContentType.objects.get_for_model(City).pk
redirect_url = model_admin.get_view_on_site_url(self.c1)
self.assertEqual(
redirect_url,
reverse(
f"{customadmin.site.name}:view_on_site",
kwargs={
"content_type_id": content_type_pk,
"object_id": self.c1.pk,
},
),
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class InlineAdminViewOnSiteTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = State.objects.create(name="New York")
cls.s2 = State.objects.create(name="Illinois")
cls.s3 = State.objects.create(name="California")
cls.c1 = City.objects.create(state=cls.s1, name="New York")
cls.c2 = City.objects.create(state=cls.s2, name="Chicago")
cls.c3 = City.objects.create(state=cls.s3, name="San Francisco")
cls.r1 = Restaurant.objects.create(city=cls.c1, name="Italian Pizza")
cls.r2 = Restaurant.objects.create(city=cls.c1, name="Boulevard")
cls.r3 = Restaurant.objects.create(city=cls.c2, name="Chinese Dinner")
cls.r4 = Restaurant.objects.create(city=cls.c2, name="Angels")
cls.r5 = Restaurant.objects.create(city=cls.c2, name="Take Away")
cls.r6 = Restaurant.objects.create(city=cls.c3, name="The Unknown Restaurant")
cls.w1 = Worker.objects.create(work_at=cls.r1, name="Mario", surname="Rossi")
cls.w2 = Worker.objects.create(
work_at=cls.r1, name="Antonio", surname="Bianchi"
)
cls.w3 = Worker.objects.create(work_at=cls.r1, name="John", surname="Doe")
def setUp(self):
self.client.force_login(self.superuser)
def test_false(self):
"The 'View on site' button is not displayed if view_on_site is False"
response = self.client.get(
reverse("admin:admin_views_state_change", args=(self.s1.pk,))
)
content_type_pk = ContentType.objects.get_for_model(City).pk
self.assertNotContains(
response, reverse("admin:view_on_site", args=(content_type_pk, self.c1.pk))
)
def test_true(self):
"The 'View on site' button is displayed if view_on_site is True"
response = self.client.get(
reverse("admin:admin_views_city_change", args=(self.c1.pk,))
)
content_type_pk = ContentType.objects.get_for_model(Restaurant).pk
self.assertContains(
response, reverse("admin:view_on_site", args=(content_type_pk, self.r1.pk))
)
def test_callable(self):
"The right link is displayed if view_on_site is a callable"
response = self.client.get(
reverse("admin:admin_views_restaurant_change", args=(self.r1.pk,))
)
self.assertContains(
response, '"/worker_inline/%s/%s/"' % (self.w1.surname, self.w1.name)
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class GetFormsetsWithInlinesArgumentTest(TestCase):
"""
#23934 - When adding a new model instance in the admin, the 'obj' argument
of get_formsets_with_inlines() should be None. When changing, it should be
equal to the existing model instance.
The GetFormsetsArgumentCheckingAdmin ModelAdmin throws an exception
if obj is not None during add_view or obj is None during change_view.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_explicitly_provided_pk(self):
post_data = {"name": "1"}
response = self.client.post(
reverse("admin:admin_views_explicitlyprovidedpk_add"), post_data
)
self.assertEqual(response.status_code, 302)
post_data = {"name": "2"}
response = self.client.post(
reverse("admin:admin_views_explicitlyprovidedpk_change", args=(1,)),
post_data,
)
self.assertEqual(response.status_code, 302)
def test_implicitly_generated_pk(self):
post_data = {"name": "1"}
response = self.client.post(
reverse("admin:admin_views_implicitlygeneratedpk_add"), post_data
)
self.assertEqual(response.status_code, 302)
post_data = {"name": "2"}
response = self.client.post(
reverse("admin:admin_views_implicitlygeneratedpk_change", args=(1,)),
post_data,
)
self.assertEqual(response.status_code, 302)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminSiteFinalCatchAllPatternTests(TestCase):
"""
Verifies the behaviour of the admin catch-all view.
* Anonynous/non-staff users are redirected to login for all URLs, whether
otherwise valid or not.
* APPEND_SLASH is applied for staff if needed.
* Otherwise Http404.
* Catch-all view disabled via AdminSite.final_catch_all_view.
"""
def test_unknown_url_redirects_login_if_not_authenticated(self):
unknown_url = "/test_admin/admin/unknown/"
response = self.client.get(unknown_url)
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin:login"), unknown_url)
)
def test_unknown_url_404_if_authenticated(self):
superuser = User.objects.create_superuser(
username="super",
password="secret",
email="[email protected]",
)
self.client.force_login(superuser)
unknown_url = "/test_admin/admin/unknown/"
response = self.client.get(unknown_url)
self.assertEqual(response.status_code, 404)
def test_known_url_redirects_login_if_not_authenticated(self):
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url)
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin:login"), known_url)
)
def test_known_url_missing_slash_redirects_login_if_not_authenticated(self):
known_url = reverse("admin:admin_views_article_changelist")[:-1]
response = self.client.get(known_url)
# Redirects with the next URL also missing the slash.
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin:login"), known_url)
)
def test_non_admin_url_shares_url_prefix(self):
url = reverse("non_admin")[:-1]
response = self.client.get(url)
# Redirects with the next URL also missing the slash.
self.assertRedirects(response, "%s?next=%s" % (reverse("admin:login"), url))
def test_url_without_trailing_slash_if_not_authenticated(self):
url = reverse("admin:article_extra_json")
response = self.client.get(url)
self.assertRedirects(response, "%s?next=%s" % (reverse("admin:login"), url))
def test_unkown_url_without_trailing_slash_if_not_authenticated(self):
url = reverse("admin:article_extra_json")[:-1]
response = self.client.get(url)
self.assertRedirects(response, "%s?next=%s" % (reverse("admin:login"), url))
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_unknown_url(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
unknown_url = "/test_admin/admin/unknown/"
response = self.client.get(unknown_url[:-1])
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response, known_url, status_code=301, target_status_code=403
)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_script_name(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1], SCRIPT_NAME="/prefix/")
self.assertRedirects(
response,
"/prefix" + known_url,
status_code=301,
fetch_redirect_response=False,
)
@override_settings(APPEND_SLASH=True, FORCE_SCRIPT_NAME="/prefix/")
def test_missing_slash_append_slash_true_force_script_name(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response,
"/prefix" + known_url,
status_code=301,
fetch_redirect_response=False,
)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_non_staff_user(self):
user = User.objects.create_user(
username="user",
password="secret",
email="[email protected]",
is_staff=False,
)
self.client.force_login(user)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response,
"/test_admin/admin/login/?next=/test_admin/admin/admin_views/article",
)
@override_settings(APPEND_SLASH=False)
def test_missing_slash_append_slash_false(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_single_model_no_append_slash(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin9:admin_views_actor_changelist")
response = self.client.get(known_url[:-1])
self.assertEqual(response.status_code, 404)
# Same tests above with final_catch_all_view=False.
def test_unknown_url_404_if_not_authenticated_without_final_catch_all_view(self):
unknown_url = "/test_admin/admin10/unknown/"
response = self.client.get(unknown_url)
self.assertEqual(response.status_code, 404)
def test_unknown_url_404_if_authenticated_without_final_catch_all_view(self):
superuser = User.objects.create_superuser(
username="super",
password="secret",
email="[email protected]",
)
self.client.force_login(superuser)
unknown_url = "/test_admin/admin10/unknown/"
response = self.client.get(unknown_url)
self.assertEqual(response.status_code, 404)
def test_known_url_redirects_login_if_not_auth_without_final_catch_all_view(
self,
):
known_url = reverse("admin10:admin_views_article_changelist")
response = self.client.get(known_url)
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin10:login"), known_url)
)
def test_known_url_missing_slash_redirects_with_slash_if_not_auth_no_catch_all_view(
self,
):
known_url = reverse("admin10:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response, known_url, status_code=301, fetch_redirect_response=False
)
def test_non_admin_url_shares_url_prefix_without_final_catch_all_view(self):
url = reverse("non_admin10")
response = self.client.get(url[:-1])
self.assertRedirects(response, url, status_code=301)
def test_url_no_trailing_slash_if_not_auth_without_final_catch_all_view(
self,
):
url = reverse("admin10:article_extra_json")
response = self.client.get(url)
self.assertRedirects(response, "%s?next=%s" % (reverse("admin10:login"), url))
def test_unknown_url_no_trailing_slash_if_not_auth_without_final_catch_all_view(
self,
):
url = reverse("admin10:article_extra_json")[:-1]
response = self.client.get(url)
# Matches test_admin/admin10/admin_views/article/<path:object_id>/
self.assertRedirects(
response, url + "/", status_code=301, fetch_redirect_response=False
)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_unknown_url_without_final_catch_all_view(
self,
):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
unknown_url = "/test_admin/admin10/unknown/"
response = self.client.get(unknown_url[:-1])
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_without_final_catch_all_view(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin10:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response, known_url, status_code=301, target_status_code=403
)
@override_settings(APPEND_SLASH=False)
def test_missing_slash_append_slash_false_without_final_catch_all_view(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin10:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertEqual(response.status_code, 404)
# Outside admin.
def test_non_admin_url_404_if_not_authenticated(self):
unknown_url = "/unknown/"
response = self.client.get(unknown_url)
# Does not redirect to the admin login.
self.assertEqual(response.status_code, 404)
|
150da2b6cc69a350da02ea6503e8638559967b9f9aa75d977bb07585b310bd32 | import datetime
from unittest import mock
from django.contrib.postgres.indexes import OpClass
from django.core.exceptions import ValidationError
from django.db import IntegrityError, NotSupportedError, connection, transaction
from django.db.models import (
CheckConstraint,
Deferrable,
F,
Func,
IntegerField,
Model,
Q,
UniqueConstraint,
)
from django.db.models.fields.json import KeyTextTransform
from django.db.models.functions import Cast, Left, Lower
from django.test import ignore_warnings, modify_settings, skipUnlessDBFeature
from django.test.utils import isolate_apps
from django.utils import timezone
from django.utils.deprecation import RemovedInDjango50Warning
from . import PostgreSQLTestCase
from .models import HotelReservation, IntegerArrayModel, RangesModel, Room, Scene
try:
from psycopg2.extras import DateRange, NumericRange
from django.contrib.postgres.constraints import ExclusionConstraint
from django.contrib.postgres.fields import (
DateTimeRangeField,
RangeBoundary,
RangeOperators,
)
except ImportError:
pass
@modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"})
class SchemaTests(PostgreSQLTestCase):
get_opclass_query = """
SELECT opcname, c.relname FROM pg_opclass AS oc
JOIN pg_index as i on oc.oid = ANY(i.indclass)
JOIN pg_class as c on c.oid = i.indexrelid
WHERE c.relname = %s
"""
def get_constraints(self, table):
"""Get the constraints on the table using a new cursor."""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def test_check_constraint_range_value(self):
constraint_name = "ints_between"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = CheckConstraint(
check=Q(ints__contained_by=NumericRange(10, 30)),
name=constraint_name,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(20, 50))
RangesModel.objects.create(ints=(10, 30))
def test_check_constraint_array_contains(self):
constraint = CheckConstraint(
check=Q(field__contains=[1]),
name="array_contains",
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(IntegerArrayModel, IntegerArrayModel())
constraint.validate(IntegerArrayModel, IntegerArrayModel(field=[1]))
def test_check_constraint_daterange_contains(self):
constraint_name = "dates_contains"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = CheckConstraint(
check=Q(dates__contains=F("dates_inner")),
name=constraint_name,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
date_1 = datetime.date(2016, 1, 1)
date_2 = datetime.date(2016, 1, 4)
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(
dates=(date_1, date_2),
dates_inner=(date_1, date_2.replace(day=5)),
)
RangesModel.objects.create(
dates=(date_1, date_2),
dates_inner=(date_1, date_2),
)
def test_check_constraint_datetimerange_contains(self):
constraint_name = "timestamps_contains"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = CheckConstraint(
check=Q(timestamps__contains=F("timestamps_inner")),
name=constraint_name,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
datetime_1 = datetime.datetime(2016, 1, 1)
datetime_2 = datetime.datetime(2016, 1, 2, 12)
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(
timestamps=(datetime_1, datetime_2),
timestamps_inner=(datetime_1, datetime_2.replace(hour=13)),
)
RangesModel.objects.create(
timestamps=(datetime_1, datetime_2),
timestamps_inner=(datetime_1, datetime_2),
)
def test_check_constraint_range_contains(self):
constraint = CheckConstraint(
check=Q(ints__contains=(1, 5)),
name="ints_contains",
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(RangesModel, RangesModel(ints=(6, 10)))
def test_check_constraint_range_lower_upper(self):
constraint = CheckConstraint(
check=Q(ints__startswith__gte=0) & Q(ints__endswith__lte=99),
name="ints_range_lower_upper",
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(RangesModel, RangesModel(ints=(-1, 20)))
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(RangesModel, RangesModel(ints=(0, 100)))
constraint.validate(RangesModel, RangesModel(ints=(0, 99)))
def test_check_constraint_range_lower_with_nulls(self):
constraint = CheckConstraint(
check=Q(ints__isnull=True) | Q(ints__startswith__gte=0),
name="ints_optional_positive_range",
)
constraint.validate(RangesModel, RangesModel())
constraint = CheckConstraint(
check=Q(ints__startswith__gte=0),
name="ints_positive_range",
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(RangesModel, RangesModel())
def test_opclass(self):
constraint = UniqueConstraint(
name="test_opclass",
fields=["scene"],
opclasses=["varchar_pattern_ops"],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
self.assertIn(constraint.name, self.get_constraints(Scene._meta.db_table))
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertEqual(
cursor.fetchall(),
[("varchar_pattern_ops", constraint.name)],
)
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Scene, constraint)
self.assertNotIn(constraint.name, self.get_constraints(Scene._meta.db_table))
def test_opclass_multiple_columns(self):
constraint = UniqueConstraint(
name="test_opclass_multiple",
fields=["scene", "setting"],
opclasses=["varchar_pattern_ops", "text_pattern_ops"],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
expected_opclasses = (
("varchar_pattern_ops", constraint.name),
("text_pattern_ops", constraint.name),
)
self.assertCountEqual(cursor.fetchall(), expected_opclasses)
def test_opclass_partial(self):
constraint = UniqueConstraint(
name="test_opclass_partial",
fields=["scene"],
opclasses=["varchar_pattern_ops"],
condition=Q(setting__contains="Sir Bedemir's Castle"),
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertCountEqual(
cursor.fetchall(),
[("varchar_pattern_ops", constraint.name)],
)
@skipUnlessDBFeature("supports_covering_indexes")
def test_opclass_include(self):
constraint = UniqueConstraint(
name="test_opclass_include",
fields=["scene"],
opclasses=["varchar_pattern_ops"],
include=["setting"],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertCountEqual(
cursor.fetchall(),
[("varchar_pattern_ops", constraint.name)],
)
@skipUnlessDBFeature("supports_expression_indexes")
def test_opclass_func(self):
constraint = UniqueConstraint(
OpClass(Lower("scene"), name="text_pattern_ops"),
name="test_opclass_func",
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
constraints = self.get_constraints(Scene._meta.db_table)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertIn(constraint.name, constraints)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertEqual(
cursor.fetchall(),
[("text_pattern_ops", constraint.name)],
)
Scene.objects.create(scene="Scene 10", setting="The dark forest of Ewing")
with self.assertRaises(IntegrityError), transaction.atomic():
Scene.objects.create(scene="ScEnE 10", setting="Sir Bedemir's Castle")
Scene.objects.create(scene="Scene 5", setting="Sir Bedemir's Castle")
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Scene, constraint)
self.assertNotIn(constraint.name, self.get_constraints(Scene._meta.db_table))
Scene.objects.create(scene="ScEnE 10", setting="Sir Bedemir's Castle")
@modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"})
class ExclusionConstraintTests(PostgreSQLTestCase):
def get_constraints(self, table):
"""Get the constraints on the table using a new cursor."""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def test_invalid_condition(self):
msg = "ExclusionConstraint.condition must be a Q instance."
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
index_type="GIST",
name="exclude_invalid_condition",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
condition=F("invalid"),
)
def test_invalid_index_type(self):
msg = "Exclusion constraints only support GiST or SP-GiST indexes."
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
index_type="gin",
name="exclude_invalid_index_type",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
)
def test_invalid_expressions(self):
msg = "The expressions must be a list of 2-tuples."
for expressions in (["foo"], [("foo")], [("foo_1", "foo_2", "foo_3")]):
with self.subTest(expressions), self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
index_type="GIST",
name="exclude_invalid_expressions",
expressions=expressions,
)
def test_empty_expressions(self):
msg = "At least one expression is required to define an exclusion constraint."
for empty_expressions in (None, []):
with self.subTest(empty_expressions), self.assertRaisesMessage(
ValueError, msg
):
ExclusionConstraint(
index_type="GIST",
name="exclude_empty_expressions",
expressions=empty_expressions,
)
def test_invalid_deferrable(self):
msg = "ExclusionConstraint.deferrable must be a Deferrable instance."
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name="exclude_invalid_deferrable",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
deferrable="invalid",
)
def test_deferrable_with_condition(self):
msg = "ExclusionConstraint with conditions cannot be deferred."
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name="exclude_invalid_condition",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
condition=Q(cancelled=False),
deferrable=Deferrable.DEFERRED,
)
def test_invalid_include_type(self):
msg = "ExclusionConstraint.include must be a list or tuple."
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name="exclude_invalid_include",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
include="invalid",
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_invalid_opclasses_type(self):
msg = "ExclusionConstraint.opclasses must be a list or tuple."
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name="exclude_invalid_opclasses",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
opclasses="invalid",
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_opclasses_and_expressions_same_length(self):
msg = (
"ExclusionConstraint.expressions and "
"ExclusionConstraint.opclasses must have the same number of "
"elements."
)
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name="exclude_invalid_expressions_opclasses_length",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
opclasses=["foo", "bar"],
)
def test_repr(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
(F("datespan"), RangeOperators.OVERLAPS),
(F("room"), RangeOperators.EQUAL),
],
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '&&'), (F(room), '=')] name='exclude_overlapping'>",
)
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)],
condition=Q(cancelled=False),
index_type="SPGiST",
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='SPGiST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"condition=(AND: ('cancelled', False))>",
)
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)],
deferrable=Deferrable.IMMEDIATE,
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"deferrable=Deferrable.IMMEDIATE>",
)
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)],
include=["cancelled", "room"],
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"include=('cancelled', 'room')>",
)
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
(OpClass("datespan", name="range_ops"), RangeOperators.ADJACENT_TO),
],
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(OpClass(F(datespan), name=range_ops), '-|-')] "
"name='exclude_overlapping'>",
)
def test_eq(self):
constraint_1 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
(F("datespan"), RangeOperators.OVERLAPS),
(F("room"), RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
)
constraint_2 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
)
constraint_3 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[("datespan", RangeOperators.OVERLAPS)],
condition=Q(cancelled=False),
)
constraint_4 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
deferrable=Deferrable.DEFERRED,
)
constraint_5 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
deferrable=Deferrable.IMMEDIATE,
)
constraint_6 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
deferrable=Deferrable.IMMEDIATE,
include=["cancelled"],
)
constraint_7 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
include=["cancelled"],
)
with ignore_warnings(category=RemovedInDjango50Warning):
constraint_8 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
include=["cancelled"],
opclasses=["range_ops", "range_ops"],
)
constraint_9 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
opclasses=["range_ops", "range_ops"],
)
self.assertNotEqual(constraint_2, constraint_9)
self.assertNotEqual(constraint_7, constraint_8)
constraint_10 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
(F("datespan"), RangeOperators.OVERLAPS),
(F("room"), RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
violation_error_message="custom error",
)
constraint_11 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
(F("datespan"), RangeOperators.OVERLAPS),
(F("room"), RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
violation_error_message="other custom error",
)
self.assertEqual(constraint_1, constraint_1)
self.assertEqual(constraint_1, mock.ANY)
self.assertNotEqual(constraint_1, constraint_2)
self.assertNotEqual(constraint_1, constraint_3)
self.assertNotEqual(constraint_1, constraint_4)
self.assertNotEqual(constraint_1, constraint_10)
self.assertNotEqual(constraint_2, constraint_3)
self.assertNotEqual(constraint_2, constraint_4)
self.assertNotEqual(constraint_2, constraint_7)
self.assertNotEqual(constraint_4, constraint_5)
self.assertNotEqual(constraint_5, constraint_6)
self.assertNotEqual(constraint_1, object())
self.assertNotEqual(constraint_10, constraint_11)
self.assertEqual(constraint_10, constraint_10)
def test_deconstruct(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(
path, "django.contrib.postgres.constraints.ExclusionConstraint"
)
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "exclude_overlapping",
"expressions": [
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
},
)
def test_deconstruct_index_type(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
index_type="SPGIST",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(
path, "django.contrib.postgres.constraints.ExclusionConstraint"
)
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "exclude_overlapping",
"index_type": "SPGIST",
"expressions": [
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
},
)
def test_deconstruct_condition(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(
path, "django.contrib.postgres.constraints.ExclusionConstraint"
)
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "exclude_overlapping",
"expressions": [
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
"condition": Q(cancelled=False),
},
)
def test_deconstruct_deferrable(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[("datespan", RangeOperators.OVERLAPS)],
deferrable=Deferrable.DEFERRED,
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(
path, "django.contrib.postgres.constraints.ExclusionConstraint"
)
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "exclude_overlapping",
"expressions": [("datespan", RangeOperators.OVERLAPS)],
"deferrable": Deferrable.DEFERRED,
},
)
def test_deconstruct_include(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[("datespan", RangeOperators.OVERLAPS)],
include=["cancelled", "room"],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(
path, "django.contrib.postgres.constraints.ExclusionConstraint"
)
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "exclude_overlapping",
"expressions": [("datespan", RangeOperators.OVERLAPS)],
"include": ("cancelled", "room"),
},
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_deconstruct_opclasses(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[("datespan", RangeOperators.OVERLAPS)],
opclasses=["range_ops"],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(
path, "django.contrib.postgres.constraints.ExclusionConstraint"
)
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "exclude_overlapping",
"expressions": [("datespan", RangeOperators.OVERLAPS)],
"opclasses": ["range_ops"],
},
)
def _test_range_overlaps(self, constraint):
# Create exclusion constraint.
self.assertNotIn(
constraint.name, self.get_constraints(HotelReservation._meta.db_table)
)
with connection.schema_editor() as editor:
editor.add_constraint(HotelReservation, constraint)
self.assertIn(
constraint.name, self.get_constraints(HotelReservation._meta.db_table)
)
# Add initial reservations.
room101 = Room.objects.create(number=101)
room102 = Room.objects.create(number=102)
datetimes = [
timezone.datetime(2018, 6, 20),
timezone.datetime(2018, 6, 24),
timezone.datetime(2018, 6, 26),
timezone.datetime(2018, 6, 28),
timezone.datetime(2018, 6, 29),
]
reservation = HotelReservation.objects.create(
datespan=DateRange(datetimes[0].date(), datetimes[1].date()),
start=datetimes[0],
end=datetimes[1],
room=room102,
)
constraint.validate(HotelReservation, reservation)
HotelReservation.objects.create(
datespan=DateRange(datetimes[1].date(), datetimes[3].date()),
start=datetimes[1],
end=datetimes[3],
room=room102,
)
HotelReservation.objects.create(
datespan=DateRange(datetimes[3].date(), datetimes[4].date()),
start=datetimes[3],
end=datetimes[4],
room=room102,
cancelled=True,
)
# Overlap dates.
with self.assertRaises(IntegrityError), transaction.atomic():
reservation = HotelReservation(
datespan=(datetimes[1].date(), datetimes[2].date()),
start=datetimes[1],
end=datetimes[2],
room=room102,
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(HotelReservation, reservation)
reservation.save()
# Valid range.
other_valid_reservations = [
# Other room.
HotelReservation(
datespan=(datetimes[1].date(), datetimes[2].date()),
start=datetimes[1],
end=datetimes[2],
room=room101,
),
# Cancelled reservation.
HotelReservation(
datespan=(datetimes[1].date(), datetimes[1].date()),
start=datetimes[1],
end=datetimes[2],
room=room102,
cancelled=True,
),
# Other adjacent dates.
HotelReservation(
datespan=(datetimes[3].date(), datetimes[4].date()),
start=datetimes[3],
end=datetimes[4],
room=room102,
),
]
for reservation in other_valid_reservations:
constraint.validate(HotelReservation, reservation)
HotelReservation.objects.bulk_create(other_valid_reservations)
# Excluded fields.
constraint.validate(
HotelReservation,
HotelReservation(
datespan=(datetimes[1].date(), datetimes[2].date()),
start=datetimes[1],
end=datetimes[2],
room=room102,
),
exclude={"room"},
)
constraint.validate(
HotelReservation,
HotelReservation(
datespan=(datetimes[1].date(), datetimes[2].date()),
start=datetimes[1],
end=datetimes[2],
room=room102,
),
exclude={"datespan", "start", "end", "room"},
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_range_overlaps_custom_opclasses(self):
class TsTzRange(Func):
function = "TSTZRANGE"
output_field = DateTimeRangeField()
constraint = ExclusionConstraint(
name="exclude_overlapping_reservations_custom",
expressions=[
(TsTzRange("start", "end", RangeBoundary()), RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
opclasses=["range_ops", "gist_int4_ops"],
)
self._test_range_overlaps(constraint)
def test_range_overlaps_custom(self):
class TsTzRange(Func):
function = "TSTZRANGE"
output_field = DateTimeRangeField()
constraint = ExclusionConstraint(
name="exclude_overlapping_reservations_custom_opclass",
expressions=[
(
OpClass(TsTzRange("start", "end", RangeBoundary()), "range_ops"),
RangeOperators.OVERLAPS,
),
(OpClass("room", "gist_int4_ops"), RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
)
self._test_range_overlaps(constraint)
def test_range_overlaps(self):
constraint = ExclusionConstraint(
name="exclude_overlapping_reservations",
expressions=[
(F("datespan"), RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
)
self._test_range_overlaps(constraint)
def test_range_adjacent(self):
constraint_name = "ints_adjacent"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(RangesModel, constraint)
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
def test_validate_range_adjacent(self):
constraint = ExclusionConstraint(
name="ints_adjacent",
expressions=[("ints", RangeOperators.ADJACENT_TO)],
violation_error_message="Custom error message.",
)
range_obj = RangesModel.objects.create(ints=(20, 50))
constraint.validate(RangesModel, range_obj)
msg = "Custom error message."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(RangesModel, RangesModel(ints=(10, 20)))
constraint.validate(RangesModel, RangesModel(ints=(10, 19)))
constraint.validate(RangesModel, RangesModel(ints=(51, 60)))
constraint.validate(RangesModel, RangesModel(ints=(10, 20)), exclude={"ints"})
def test_expressions_with_params(self):
constraint_name = "scene_left_equal"
self.assertNotIn(constraint_name, self.get_constraints(Scene._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[(Left("scene", 4), RangeOperators.EQUAL)],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
self.assertIn(constraint_name, self.get_constraints(Scene._meta.db_table))
def test_expressions_with_key_transform(self):
constraint_name = "exclude_overlapping_reservations_smoking"
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(F("datespan"), RangeOperators.OVERLAPS),
(KeyTextTransform("smoking", "requirements"), RangeOperators.EQUAL),
],
)
with connection.schema_editor() as editor:
editor.add_constraint(HotelReservation, constraint)
self.assertIn(
constraint_name,
self.get_constraints(HotelReservation._meta.db_table),
)
def test_index_transform(self):
constraint_name = "first_index_equal"
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("field__0", RangeOperators.EQUAL)],
)
with connection.schema_editor() as editor:
editor.add_constraint(IntegerArrayModel, constraint)
self.assertIn(
constraint_name,
self.get_constraints(IntegerArrayModel._meta.db_table),
)
def test_range_adjacent_initially_deferred(self):
constraint_name = "ints_adjacent_deferred"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
adjacent_range = RangesModel.objects.create(ints=(10, 20))
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(constraint_name)
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name)
# Remove adjacent range before the end of transaction.
adjacent_range.delete()
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
def test_range_adjacent_gist_include(self):
constraint_name = "ints_adjacent_gist_include"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="gist",
include=["decimals", "ints"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
@skipUnlessDBFeature("supports_covering_spgist_indexes")
def test_range_adjacent_spgist_include(self):
constraint_name = "ints_adjacent_spgist_include"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="spgist",
include=["decimals", "ints"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
def test_range_adjacent_gist_include_condition(self):
constraint_name = "ints_adjacent_gist_include_condition"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="gist",
include=["decimals"],
condition=Q(id__gte=100),
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@skipUnlessDBFeature("supports_covering_spgist_indexes")
def test_range_adjacent_spgist_include_condition(self):
constraint_name = "ints_adjacent_spgist_include_condition"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="spgist",
include=["decimals"],
condition=Q(id__gte=100),
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_range_adjacent_gist_include_deferrable(self):
constraint_name = "ints_adjacent_gist_include_deferrable"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="gist",
include=["decimals"],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@skipUnlessDBFeature("supports_covering_spgist_indexes")
def test_range_adjacent_spgist_include_deferrable(self):
constraint_name = "ints_adjacent_spgist_include_deferrable"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="spgist",
include=["decimals"],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_spgist_include_not_supported(self):
constraint_name = "ints_adjacent_spgist_include_not_supported"
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="spgist",
include=["id"],
)
msg = (
"Covering exclusion constraints using an SP-GiST index require "
"PostgreSQL 14+."
)
with connection.schema_editor() as editor:
with mock.patch(
"django.db.backends.postgresql.features.DatabaseFeatures."
"supports_covering_spgist_indexes",
False,
):
with self.assertRaisesMessage(NotSupportedError, msg):
editor.add_constraint(RangesModel, constraint)
def test_range_adjacent_opclass(self):
constraint_name = "ints_adjacent_opclass"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO),
],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
constraints = self.get_constraints(RangesModel._meta.db_table)
self.assertIn(constraint_name, constraints)
with editor.connection.cursor() as cursor:
cursor.execute(SchemaTests.get_opclass_query, [constraint_name])
self.assertEqual(
cursor.fetchall(),
[("range_ops", constraint_name)],
)
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(RangesModel, constraint)
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
def test_range_adjacent_opclass_condition(self):
constraint_name = "ints_adjacent_opclass_condition"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO),
],
condition=Q(id__gte=100),
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_range_adjacent_opclass_deferrable(self):
constraint_name = "ints_adjacent_opclass_deferrable"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO),
],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_range_adjacent_gist_opclass_include(self):
constraint_name = "ints_adjacent_gist_opclass_include"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO),
],
index_type="gist",
include=["decimals"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@skipUnlessDBFeature("supports_covering_spgist_indexes")
def test_range_adjacent_spgist_opclass_include(self):
constraint_name = "ints_adjacent_spgist_opclass_include"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO),
],
index_type="spgist",
include=["decimals"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_range_equal_cast(self):
constraint_name = "exclusion_equal_room_cast"
self.assertNotIn(constraint_name, self.get_constraints(Room._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[(Cast("number", IntegerField()), RangeOperators.EQUAL)],
)
with connection.schema_editor() as editor:
editor.add_constraint(Room, constraint)
self.assertIn(constraint_name, self.get_constraints(Room._meta.db_table))
@isolate_apps("postgres_tests")
def test_table_create(self):
constraint_name = "exclusion_equal_number_tc"
class ModelWithExclusionConstraint(Model):
number = IntegerField()
class Meta:
app_label = "postgres_tests"
constraints = [
ExclusionConstraint(
name=constraint_name,
expressions=[("number", RangeOperators.EQUAL)],
)
]
with connection.schema_editor() as editor:
editor.create_model(ModelWithExclusionConstraint)
self.assertIn(
constraint_name,
self.get_constraints(ModelWithExclusionConstraint._meta.db_table),
)
@modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"})
class ExclusionConstraintOpclassesDepracationTests(PostgreSQLTestCase):
def get_constraints(self, table):
"""Get the constraints on the table using a new cursor."""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def test_warning(self):
msg = (
"The opclasses argument is deprecated in favor of using "
"django.contrib.postgres.indexes.OpClass in "
"ExclusionConstraint.expressions."
)
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
ExclusionConstraint(
name="exclude_overlapping",
expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)],
opclasses=["range_ops"],
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_repr(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)],
opclasses=["range_ops"],
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"opclasses=['range_ops']>",
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_range_adjacent_opclasses(self):
constraint_name = "ints_adjacent_opclasses"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
opclasses=["range_ops"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
constraints = self.get_constraints(RangesModel._meta.db_table)
self.assertIn(constraint_name, constraints)
with editor.connection.cursor() as cursor:
cursor.execute(SchemaTests.get_opclass_query, [constraint.name])
self.assertEqual(
cursor.fetchall(),
[("range_ops", constraint.name)],
)
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(RangesModel, constraint)
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_range_adjacent_opclasses_condition(self):
constraint_name = "ints_adjacent_opclasses_condition"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
opclasses=["range_ops"],
condition=Q(id__gte=100),
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@ignore_warnings(category=RemovedInDjango50Warning)
def test_range_adjacent_opclasses_deferrable(self):
constraint_name = "ints_adjacent_opclasses_deferrable"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
opclasses=["range_ops"],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@ignore_warnings(category=RemovedInDjango50Warning)
def test_range_adjacent_gist_opclasses_include(self):
constraint_name = "ints_adjacent_gist_opclasses_include"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="gist",
opclasses=["range_ops"],
include=["decimals"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@ignore_warnings(category=RemovedInDjango50Warning)
@skipUnlessDBFeature("supports_covering_spgist_indexes")
def test_range_adjacent_spgist_opclasses_include(self):
constraint_name = "ints_adjacent_spgist_opclasses_include"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="spgist",
opclasses=["range_ops"],
include=["decimals"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
|
fa39b5f29f89d56512e9f16664c242ed6d7e69897808a6d6bebcdbe4d8cab7a1 | import datetime
import pickle
import sys
import unittest
from operator import attrgetter
from threading import Lock
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DEFAULT_DB_ALIAS, connection
from django.db.models import Count, Exists, F, Max, OuterRef, Q
from django.db.models.expressions import RawSQL
from django.db.models.sql.constants import LOUTER
from django.db.models.sql.where import AND, OR, NothingNode, WhereNode
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext, ignore_warnings
from django.utils.deprecation import RemovedInDjango50Warning
from .models import (
FK1,
Annotation,
Article,
Author,
BaseA,
BaseUser,
Book,
CategoryItem,
CategoryRelationship,
Celebrity,
Channel,
Chapter,
Child,
ChildObjectA,
Classroom,
CommonMixedCaseForeignKeys,
Company,
Cover,
CustomPk,
CustomPkTag,
DateTimePK,
Detail,
DumbCategory,
Eaten,
Employment,
ExtraInfo,
Fan,
Food,
Identifier,
Individual,
Item,
Job,
JobResponsibilities,
Join,
LeafA,
LeafB,
LoopX,
LoopZ,
ManagedModel,
Member,
MixedCaseDbColumnCategoryItem,
MixedCaseFieldCategoryItem,
ModelA,
ModelB,
ModelC,
ModelD,
MyObject,
NamedCategory,
Node,
Note,
NullableName,
Number,
ObjectA,
ObjectB,
ObjectC,
OneToOneCategory,
Order,
OrderItem,
Page,
Paragraph,
Person,
Plaything,
PointerA,
Program,
ProxyCategory,
ProxyObjectA,
ProxyObjectB,
Ranking,
Related,
RelatedIndividual,
RelatedObject,
Report,
ReportComment,
ReservedName,
Responsibility,
School,
SharedConnection,
SimpleCategory,
SingleObject,
SpecialCategory,
Staff,
StaffUser,
Student,
Tag,
Task,
Teacher,
Ticket21203Child,
Ticket21203Parent,
Ticket23605A,
Ticket23605B,
Ticket23605C,
TvChef,
Valid,
X,
)
class Queries1Tests(TestCase):
@classmethod
def setUpTestData(cls):
cls.nc1 = generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name="t1", category=generic)
cls.t2 = Tag.objects.create(name="t2", parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name="t3", parent=cls.t1)
cls.t4 = Tag.objects.create(name="t4", parent=cls.t3)
cls.t5 = Tag.objects.create(name="t5", parent=cls.t3)
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
cls.n2 = Note.objects.create(note="n2", misc="bar", id=2)
cls.n3 = Note.objects.create(note="n3", misc="foo", id=3, negate=False)
cls.ann1 = Annotation.objects.create(name="a1", tag=cls.t1)
cls.ann1.notes.add(cls.n1)
ann2 = Annotation.objects.create(name="a2", tag=cls.t4)
ann2.notes.add(cls.n2, cls.n3)
# Create these out of order so that sorting by 'id' will be different to sorting
# by 'info'. Helps detect some problems later.
cls.e2 = ExtraInfo.objects.create(
info="e2", note=cls.n2, value=41, filterable=False
)
e1 = ExtraInfo.objects.create(info="e1", note=cls.n1, value=42)
cls.a1 = Author.objects.create(name="a1", num=1001, extra=e1)
cls.a2 = Author.objects.create(name="a2", num=2002, extra=e1)
cls.a3 = Author.objects.create(name="a3", num=3003, extra=cls.e2)
cls.a4 = Author.objects.create(name="a4", num=4004, extra=cls.e2)
cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
cls.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
cls.i1 = Item.objects.create(
name="one",
created=cls.time1,
modified=cls.time1,
creator=cls.a1,
note=cls.n3,
)
cls.i1.tags.set([cls.t1, cls.t2])
cls.i2 = Item.objects.create(
name="two", created=cls.time2, creator=cls.a2, note=cls.n2
)
cls.i2.tags.set([cls.t1, cls.t3])
cls.i3 = Item.objects.create(
name="three", created=time3, creator=cls.a2, note=cls.n3
)
cls.i4 = Item.objects.create(
name="four", created=time4, creator=cls.a4, note=cls.n3
)
cls.i4.tags.set([cls.t4])
cls.r1 = Report.objects.create(name="r1", creator=cls.a1)
cls.r2 = Report.objects.create(name="r2", creator=cls.a3)
cls.r3 = Report.objects.create(name="r3")
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
# will be rank3, rank2, rank1.
cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2)
cls.c1 = Cover.objects.create(title="first", item=cls.i4)
cls.c2 = Cover.objects.create(title="second", item=cls.i2)
def test_subquery_condition(self):
qs1 = Tag.objects.filter(pk__lte=0)
qs2 = Tag.objects.filter(parent__in=qs1)
qs3 = Tag.objects.filter(parent__in=qs2)
self.assertEqual(qs3.query.subq_aliases, {"T", "U", "V"})
self.assertIn("v0", str(qs3.query).lower())
qs4 = qs3.filter(parent__in=qs1)
self.assertEqual(qs4.query.subq_aliases, {"T", "U", "V"})
# It is possible to reuse U for the second subquery, no need to use W.
self.assertNotIn("w0", str(qs4.query).lower())
# So, 'U0."id"' is referenced in SELECT and WHERE twice.
self.assertEqual(str(qs4.query).lower().count("u0."), 4)
def test_ticket1050(self):
self.assertSequenceEqual(
Item.objects.filter(tags__isnull=True),
[self.i3],
)
self.assertSequenceEqual(
Item.objects.filter(tags__id__isnull=True),
[self.i3],
)
def test_ticket1801(self):
self.assertSequenceEqual(
Author.objects.filter(item=self.i2),
[self.a2],
)
self.assertSequenceEqual(
Author.objects.filter(item=self.i3),
[self.a2],
)
self.assertSequenceEqual(
Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3),
[self.a2],
)
def test_ticket2306(self):
# Checking that no join types are "left outer" joins.
query = Item.objects.filter(tags=self.t2).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1)).order_by("name"),
[self.i1, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)),
[self.i1],
)
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1)).filter(
Q(creator__name="fred") | Q(tags=self.t2)
),
[self.i1],
)
# Each filter call is processed "at once" against a single table, so this is
# different from the previous example as it tries to find tags that are two
# things at once (rather than two tags).
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)), []
)
self.assertSequenceEqual(
Item.objects.filter(
Q(tags=self.t1), Q(creator__name="fred") | Q(tags=self.t2)
),
[],
)
qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id)
self.assertSequenceEqual(list(qs), [self.a2])
self.assertEqual(2, qs.query.count_active_tables(), 2)
qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id)
self.assertEqual(qs.query.count_active_tables(), 3)
def test_ticket4464(self):
self.assertSequenceEqual(
Item.objects.filter(tags=self.t1).filter(tags=self.t2),
[self.i1],
)
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2])
.distinct()
.order_by("name"),
[self.i1, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3),
[self.i2],
)
# Make sure .distinct() works with slicing (this was broken in Oracle).
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).order_by("name")[:3],
[self.i1, self.i1, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2])
.distinct()
.order_by("name")[:3],
[self.i1, self.i2],
)
def test_tickets_2080_3592(self):
self.assertSequenceEqual(
Author.objects.filter(item__name="one") | Author.objects.filter(name="a3"),
[self.a1, self.a3],
)
self.assertSequenceEqual(
Author.objects.filter(Q(item__name="one") | Q(name="a3")),
[self.a1, self.a3],
)
self.assertSequenceEqual(
Author.objects.filter(Q(name="a3") | Q(item__name="one")),
[self.a1, self.a3],
)
self.assertSequenceEqual(
Author.objects.filter(Q(item__name="three") | Q(report__name="r3")),
[self.a2],
)
def test_ticket6074(self):
# Merging two empty result sets shouldn't leave a queryset with no constraints
# (which would match everything).
self.assertSequenceEqual(Author.objects.filter(Q(id__in=[])), [])
self.assertSequenceEqual(Author.objects.filter(Q(id__in=[]) | Q(id__in=[])), [])
def test_tickets_1878_2939(self):
self.assertEqual(Item.objects.values("creator").distinct().count(), 3)
# Create something with a duplicate 'name' so that we can test multi-column
# cases (which require some tricky SQL transformations under the covers).
xx = Item(name="four", created=self.time1, creator=self.a2, note=self.n1)
xx.save()
self.assertEqual(
Item.objects.exclude(name="two")
.values("creator", "name")
.distinct()
.count(),
4,
)
self.assertEqual(
(
Item.objects.exclude(name="two")
.extra(select={"foo": "%s"}, select_params=(1,))
.values("creator", "name", "foo")
.distinct()
.count()
),
4,
)
self.assertEqual(
(
Item.objects.exclude(name="two")
.extra(select={"foo": "%s"}, select_params=(1,))
.values("creator", "name")
.distinct()
.count()
),
4,
)
xx.delete()
def test_ticket7323(self):
self.assertEqual(Item.objects.values("creator", "name").count(), 4)
def test_ticket2253(self):
q1 = Item.objects.order_by("name")
q2 = Item.objects.filter(id=self.i1.id)
self.assertSequenceEqual(q1, [self.i4, self.i1, self.i3, self.i2])
self.assertSequenceEqual(q2, [self.i1])
self.assertSequenceEqual(
(q1 | q2).order_by("name"),
[self.i4, self.i1, self.i3, self.i2],
)
self.assertSequenceEqual((q1 & q2).order_by("name"), [self.i1])
q1 = Item.objects.filter(tags=self.t1)
q2 = Item.objects.filter(note=self.n3, tags=self.t2)
q3 = Item.objects.filter(creator=self.a4)
self.assertSequenceEqual(
((q1 & q2) | q3).order_by("name"),
[self.i4, self.i1],
)
def test_order_by_tables(self):
q1 = Item.objects.order_by("name")
q2 = Item.objects.filter(id=self.i1.id)
list(q2)
combined_query = (q1 & q2).order_by("name").query
self.assertEqual(
len(
[
t
for t in combined_query.alias_map
if combined_query.alias_refcount[t]
]
),
1,
)
def test_order_by_join_unref(self):
"""
This test is related to the above one, testing that there aren't
old JOINs in the query.
"""
qs = Celebrity.objects.order_by("greatest_fan__fan_of")
self.assertIn("OUTER JOIN", str(qs.query))
qs = qs.order_by("id")
self.assertNotIn("OUTER JOIN", str(qs.query))
def test_get_clears_ordering(self):
"""
get() should clear ordering for optimization purposes.
"""
with CaptureQueriesContext(connection) as captured_queries:
Author.objects.order_by("name").get(pk=self.a1.pk)
self.assertNotIn("order by", captured_queries[0]["sql"].lower())
def test_tickets_4088_4306(self):
self.assertSequenceEqual(Report.objects.filter(creator=1001), [self.r1])
self.assertSequenceEqual(Report.objects.filter(creator__num=1001), [self.r1])
self.assertSequenceEqual(Report.objects.filter(creator__id=1001), [])
self.assertSequenceEqual(
Report.objects.filter(creator__id=self.a1.id), [self.r1]
)
self.assertSequenceEqual(Report.objects.filter(creator__name="a1"), [self.r1])
def test_ticket4510(self):
self.assertSequenceEqual(
Author.objects.filter(report__name="r1"),
[self.a1],
)
def test_ticket7378(self):
self.assertSequenceEqual(self.a1.report_set.all(), [self.r1])
def test_tickets_5324_6704(self):
self.assertSequenceEqual(
Item.objects.filter(tags__name="t4"),
[self.i4],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t4").order_by("name").distinct(),
[self.i1, self.i3, self.i2],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t4").order_by("name").distinct().reverse(),
[self.i2, self.i3, self.i1],
)
self.assertSequenceEqual(
Author.objects.exclude(item__name="one").distinct().order_by("name"),
[self.a2, self.a3, self.a4],
)
# Excluding across a m2m relation when there is more than one related
# object associated was problematic.
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t1").order_by("name"),
[self.i4, self.i3],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t1").exclude(tags__name="t4"),
[self.i3],
)
# Excluding from a relation that cannot be NULL should not use outer joins.
query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
# Similarly, when one of the joins cannot possibly, ever, involve NULL
# values (Author -> ExtraInfo, in the following), it should never be
# promoted to a left outer join. So the following query should only
# involve one "left outer" join (Author -> Item is 0-to-many).
qs = Author.objects.filter(id=self.a1.id).filter(
Q(extra__note=self.n1) | Q(item__note=self.n3)
)
self.assertEqual(
len(
[
x
for x in qs.query.alias_map.values()
if x.join_type == LOUTER and qs.query.alias_refcount[x.table_alias]
]
),
1,
)
# The previous changes shouldn't affect nullable foreign key joins.
self.assertSequenceEqual(
Tag.objects.filter(parent__isnull=True).order_by("name"), [self.t1]
)
self.assertSequenceEqual(
Tag.objects.exclude(parent__isnull=True).order_by("name"),
[self.t2, self.t3, self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.exclude(Q(parent__name="t1") | Q(parent__isnull=True)).order_by(
"name"
),
[self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name="t1")).order_by(
"name"
),
[self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by("name"),
[self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by("name"),
[self.t4, self.t5],
)
def test_ticket2091(self):
t = Tag.objects.get(name="t4")
self.assertSequenceEqual(Item.objects.filter(tags__in=[t]), [self.i4])
def test_avoid_infinite_loop_on_too_many_subqueries(self):
x = Tag.objects.filter(pk=1)
local_recursion_limit = sys.getrecursionlimit() // 16
msg = "Maximum recursion depth exceeded: too many subqueries."
with self.assertRaisesMessage(RecursionError, msg):
for i in range(local_recursion_limit + 2):
x = Tag.objects.filter(pk__in=x)
def test_reasonable_number_of_subq_aliases(self):
x = Tag.objects.filter(pk=1)
for _ in range(20):
x = Tag.objects.filter(pk__in=x)
self.assertEqual(
x.query.subq_aliases,
{
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"AA",
"AB",
"AC",
"AD",
"AE",
"AF",
"AG",
"AH",
"AI",
"AJ",
"AK",
"AL",
"AM",
"AN",
},
)
def test_heterogeneous_qs_combination(self):
# Combining querysets built on different models should behave in a well-defined
# fashion. We raise an error.
msg = "Cannot combine queries on two different base models."
with self.assertRaisesMessage(TypeError, msg):
Author.objects.all() & Tag.objects.all()
with self.assertRaisesMessage(TypeError, msg):
Author.objects.all() | Tag.objects.all()
def test_ticket3141(self):
self.assertEqual(Author.objects.extra(select={"foo": "1"}).count(), 4)
self.assertEqual(
Author.objects.extra(select={"foo": "%s"}, select_params=(1,)).count(), 4
)
def test_ticket2400(self):
self.assertSequenceEqual(
Author.objects.filter(item__isnull=True),
[self.a3],
)
self.assertSequenceEqual(
Tag.objects.filter(item__isnull=True),
[self.t5],
)
def test_ticket2496(self):
self.assertSequenceEqual(
Item.objects.extra(tables=["queries_author"])
.select_related()
.order_by("name")[:1],
[self.i4],
)
def test_error_raised_on_filter_with_dictionary(self):
with self.assertRaisesMessage(FieldError, "Cannot parse keyword query as dict"):
Note.objects.filter({"note": "n1", "misc": "foo"})
def test_tickets_2076_7256(self):
# Ordering on related tables should be possible, even if the table is
# not otherwise involved.
self.assertSequenceEqual(
Item.objects.order_by("note__note", "name"),
[self.i2, self.i4, self.i1, self.i3],
)
# Ordering on a related field should use the remote model's default
# ordering as a final step.
self.assertSequenceEqual(
Author.objects.order_by("extra", "-name"),
[self.a2, self.a1, self.a4, self.a3],
)
# Using remote model default ordering can span multiple models (in this
# case, Cover is ordered by Item's default, which uses Note's default).
self.assertSequenceEqual(Cover.objects.all(), [self.c1, self.c2])
# If the remote model does not have a default ordering, we order by its 'id'
# field.
self.assertSequenceEqual(
Item.objects.order_by("creator", "name"),
[self.i1, self.i3, self.i2, self.i4],
)
# Ordering by a many-valued attribute (e.g. a many-to-many or reverse
# ForeignKey) is legal, but the results might not make sense. That
# isn't Django's problem. Garbage in, garbage out.
self.assertSequenceEqual(
Item.objects.filter(tags__isnull=False).order_by("tags", "id"),
[self.i1, self.i2, self.i1, self.i2, self.i4],
)
# If we replace the default ordering, Django adjusts the required
# tables automatically. Item normally requires a join with Note to do
# the default ordering, but that isn't needed here.
qs = Item.objects.order_by("name")
self.assertSequenceEqual(qs, [self.i4, self.i1, self.i3, self.i2])
self.assertEqual(len(qs.query.alias_map), 1)
def test_tickets_2874_3002(self):
qs = Item.objects.select_related().order_by("note__note", "name")
self.assertQuerysetEqual(qs, [self.i2, self.i4, self.i1, self.i3])
# This is also a good select_related() test because there are multiple
# Note entries in the SQL. The two Note items should be different.
self.assertEqual(repr(qs[0].note), "<Note: n2>")
self.assertEqual(repr(qs[0].creator.extra.note), "<Note: n1>")
def test_ticket3037(self):
self.assertSequenceEqual(
Item.objects.filter(
Q(creator__name="a3", name="two") | Q(creator__name="a4", name="four")
),
[self.i4],
)
def test_tickets_5321_7070(self):
# Ordering columns must be included in the output columns. Note that
# this means results that might otherwise be distinct are not (if there
# are multiple values in the ordering cols), as in this example. This
# isn't a bug; it's a warning to be careful with the selection of
# ordering columns.
self.assertSequenceEqual(
Note.objects.values("misc").distinct().order_by("note", "-misc"),
[{"misc": "foo"}, {"misc": "bar"}, {"misc": "foo"}],
)
def test_ticket4358(self):
# If you don't pass any fields to values(), relation fields are
# returned as "foo_id" keys, not "foo". For consistency, you should be
# able to pass "foo_id" in the fields list and have it work, too. We
# actually allow both "foo" and "foo_id".
# The *_id version is returned by default.
self.assertIn("note_id", ExtraInfo.objects.values()[0])
# You can also pass it in explicitly.
self.assertSequenceEqual(
ExtraInfo.objects.values("note_id"), [{"note_id": 1}, {"note_id": 2}]
)
# ...or use the field name.
self.assertSequenceEqual(
ExtraInfo.objects.values("note"), [{"note": 1}, {"note": 2}]
)
def test_ticket6154(self):
# Multiple filter statements are joined using "AND" all the time.
self.assertSequenceEqual(
Author.objects.filter(id=self.a1.id).filter(
Q(extra__note=self.n1) | Q(item__note=self.n3)
),
[self.a1],
)
self.assertSequenceEqual(
Author.objects.filter(
Q(extra__note=self.n1) | Q(item__note=self.n3)
).filter(id=self.a1.id),
[self.a1],
)
def test_ticket6981(self):
self.assertSequenceEqual(
Tag.objects.select_related("parent").order_by("name"),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
def test_ticket9926(self):
self.assertSequenceEqual(
Tag.objects.select_related("parent", "category").order_by("name"),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.select_related("parent", "parent__category").order_by("name"),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
def test_tickets_6180_6203(self):
# Dates with limits and/or counts
self.assertEqual(Item.objects.count(), 4)
self.assertEqual(Item.objects.datetimes("created", "month").count(), 1)
self.assertEqual(Item.objects.datetimes("created", "day").count(), 2)
self.assertEqual(len(Item.objects.datetimes("created", "day")), 2)
self.assertEqual(
Item.objects.datetimes("created", "day")[0],
datetime.datetime(2007, 12, 19, 0, 0),
)
def test_tickets_7087_12242(self):
# Dates with extra select columns
self.assertSequenceEqual(
Item.objects.datetimes("created", "day").extra(select={"a": 1}),
[
datetime.datetime(2007, 12, 19, 0, 0),
datetime.datetime(2007, 12, 20, 0, 0),
],
)
self.assertSequenceEqual(
Item.objects.extra(select={"a": 1}).datetimes("created", "day"),
[
datetime.datetime(2007, 12, 19, 0, 0),
datetime.datetime(2007, 12, 20, 0, 0),
],
)
name = "one"
self.assertSequenceEqual(
Item.objects.datetimes("created", "day").extra(
where=["name=%s"], params=[name]
),
[datetime.datetime(2007, 12, 19, 0, 0)],
)
self.assertSequenceEqual(
Item.objects.extra(where=["name=%s"], params=[name]).datetimes(
"created", "day"
),
[datetime.datetime(2007, 12, 19, 0, 0)],
)
def test_ticket7155(self):
# Nullable dates
self.assertSequenceEqual(
Item.objects.datetimes("modified", "day"),
[datetime.datetime(2007, 12, 19, 0, 0)],
)
def test_order_by_rawsql(self):
self.assertSequenceEqual(
Item.objects.values("note__note").order_by(
RawSQL("queries_note.note", ()),
"id",
),
[
{"note__note": "n2"},
{"note__note": "n3"},
{"note__note": "n3"},
{"note__note": "n3"},
],
)
def test_ticket7096(self):
# Make sure exclude() with multiple conditions continues to work.
self.assertSequenceEqual(
Tag.objects.filter(parent=self.t1, name="t3").order_by("name"),
[self.t3],
)
self.assertSequenceEqual(
Tag.objects.exclude(parent=self.t1, name="t3").order_by("name"),
[self.t1, self.t2, self.t4, self.t5],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t1", name="one")
.order_by("name")
.distinct(),
[self.i4, self.i3, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(name__in=["three", "four"])
.exclude(tags__name="t1")
.order_by("name"),
[self.i4, self.i3],
)
# More twisted cases, involving nested negations.
self.assertSequenceEqual(
Item.objects.exclude(~Q(tags__name="t1", name="one")),
[self.i1],
)
self.assertSequenceEqual(
Item.objects.filter(~Q(tags__name="t1", name="one"), name="two"),
[self.i2],
)
self.assertSequenceEqual(
Item.objects.exclude(~Q(tags__name="t1", name="one"), name="two"),
[self.i4, self.i1, self.i3],
)
def test_tickets_7204_7506(self):
# Make sure querysets with related fields can be pickled. If this
# doesn't crash, it's a Good Thing.
pickle.dumps(Item.objects.all())
def test_ticket7813(self):
# We should also be able to pickle things that use select_related().
# The only tricky thing here is to ensure that we do the related
# selections properly after unpickling.
qs = Item.objects.select_related()
query = qs.query.get_compiler(qs.db).as_sql()[0]
query2 = pickle.loads(pickle.dumps(qs.query))
self.assertEqual(query2.get_compiler(qs.db).as_sql()[0], query)
def test_deferred_load_qs_pickling(self):
# Check pickling of deferred-loading querysets
qs = Item.objects.defer("name", "creator")
q2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(list(qs), list(q2))
q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL))
self.assertEqual(list(qs), list(q3))
def test_ticket7277(self):
self.assertSequenceEqual(
self.n1.annotation_set.filter(
Q(tag=self.t5)
| Q(tag__children=self.t5)
| Q(tag__children__children=self.t5)
),
[self.ann1],
)
def test_tickets_7448_7707(self):
# Complex objects should be converted to strings before being used in
# lookups.
self.assertSequenceEqual(
Item.objects.filter(created__in=[self.time1, self.time2]),
[self.i1, self.i2],
)
def test_ticket7235(self):
# An EmptyQuerySet should not raise exceptions if it is filtered.
Eaten.objects.create(meal="m")
q = Eaten.objects.none()
with self.assertNumQueries(0):
self.assertQuerysetEqual(q.all(), [])
self.assertQuerysetEqual(q.filter(meal="m"), [])
self.assertQuerysetEqual(q.exclude(meal="m"), [])
self.assertQuerysetEqual(q.complex_filter({"pk": 1}), [])
self.assertQuerysetEqual(q.select_related("food"), [])
self.assertQuerysetEqual(q.annotate(Count("food")), [])
self.assertQuerysetEqual(q.order_by("meal", "food"), [])
self.assertQuerysetEqual(q.distinct(), [])
self.assertQuerysetEqual(q.extra(select={"foo": "1"}), [])
self.assertQuerysetEqual(q.reverse(), [])
q.query.low_mark = 1
msg = "Cannot change a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
q.extra(select={"foo": "1"})
self.assertQuerysetEqual(q.defer("meal"), [])
self.assertQuerysetEqual(q.only("meal"), [])
def test_ticket7791(self):
# There were "issues" when ordering and distinct-ing on fields related
# via ForeignKeys.
self.assertEqual(len(Note.objects.order_by("extrainfo__info").distinct()), 3)
# Pickling of QuerySets using datetimes() should work.
qs = Item.objects.datetimes("created", "month")
pickle.loads(pickle.dumps(qs))
def test_ticket9997(self):
# If a ValuesList or Values queryset is passed as an inner query, we
# make sure it's only requesting a single value and use that as the
# thing to select.
self.assertSequenceEqual(
Tag.objects.filter(
name__in=Tag.objects.filter(parent=self.t1).values("name")
),
[self.t2, self.t3],
)
# Multi-valued values() and values_list() querysets should raise errors.
with self.assertRaisesMessage(
TypeError, "Cannot use multi-field values as a filter value."
):
Tag.objects.filter(
name__in=Tag.objects.filter(parent=self.t1).values("name", "id")
)
with self.assertRaisesMessage(
TypeError, "Cannot use multi-field values as a filter value."
):
Tag.objects.filter(
name__in=Tag.objects.filter(parent=self.t1).values_list("name", "id")
)
def test_ticket9985(self):
# qs.values_list(...).values(...) combinations should work.
self.assertSequenceEqual(
Note.objects.values_list("note", flat=True).values("id").order_by("id"),
[{"id": 1}, {"id": 2}, {"id": 3}],
)
self.assertSequenceEqual(
Annotation.objects.filter(
notes__in=Note.objects.filter(note="n1")
.values_list("note")
.values("id")
),
[self.ann1],
)
def test_ticket10205(self):
# When bailing out early because of an empty "__in" filter, we need
# to set things up correctly internally so that subqueries can continue
# properly.
self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0)
def test_ticket10432(self):
# Testing an empty "__in" filter with a generator as the value.
def f():
return iter([])
n_obj = Note.objects.all()[0]
def g():
yield n_obj.pk
self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), [])
self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj])
def test_ticket10742(self):
# Queries used in an __in clause don't execute subqueries
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.filter(pk__in=subq)
self.assertSequenceEqual(qs, [self.a1, self.a2])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.exclude(pk__in=subq)
self.assertSequenceEqual(qs, [self.a3, self.a4])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
self.assertSequenceEqual(
Author.objects.filter(Q(pk__in=subq) & Q(name="a1")),
[self.a1],
)
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
def test_ticket7076(self):
# Excluding shouldn't eliminate NULL entries.
self.assertSequenceEqual(
Item.objects.exclude(modified=self.time1).order_by("name"),
[self.i4, self.i3, self.i2],
)
self.assertSequenceEqual(
Tag.objects.exclude(parent__name=self.t1.name),
[self.t1, self.t4, self.t5],
)
def test_ticket7181(self):
# Ordering by related tables should accommodate nullable fields (this
# test is a little tricky, since NULL ordering is database dependent.
# Instead, we just count the number of results).
self.assertEqual(len(Tag.objects.order_by("parent__name")), 5)
# Empty querysets can be merged with others.
self.assertSequenceEqual(
Note.objects.none() | Note.objects.all(),
[self.n1, self.n2, self.n3],
)
self.assertSequenceEqual(
Note.objects.all() | Note.objects.none(),
[self.n1, self.n2, self.n3],
)
self.assertSequenceEqual(Note.objects.none() & Note.objects.all(), [])
self.assertSequenceEqual(Note.objects.all() & Note.objects.none(), [])
def test_ticket8439(self):
# Complex combinations of conjunctions, disjunctions and nullable
# relations.
self.assertSequenceEqual(
Author.objects.filter(
Q(item__note__extrainfo=self.e2) | Q(report=self.r1, name="xyz")
),
[self.a2],
)
self.assertSequenceEqual(
Author.objects.filter(
Q(report=self.r1, name="xyz") | Q(item__note__extrainfo=self.e2)
),
[self.a2],
)
self.assertSequenceEqual(
Annotation.objects.filter(
Q(tag__parent=self.t1) | Q(notes__note="n1", name="a1")
),
[self.ann1],
)
xx = ExtraInfo.objects.create(info="xx", note=self.n3)
self.assertSequenceEqual(
Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)),
[self.n1, self.n3],
)
q = Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)).query
self.assertEqual(
len(
[
x
for x in q.alias_map.values()
if x.join_type == LOUTER and q.alias_refcount[x.table_alias]
]
),
1,
)
def test_ticket17429(self):
"""
Meta.ordering=None works the same as Meta.ordering=[]
"""
original_ordering = Tag._meta.ordering
Tag._meta.ordering = None
try:
self.assertCountEqual(
Tag.objects.all(),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
finally:
Tag._meta.ordering = original_ordering
def test_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(tags__name="t4"),
Item.objects.filter(~Q(tags__name="t4")),
)
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name="t4") | Q(tags__name="t3")),
Item.objects.filter(~(Q(tags__name="t4") | Q(tags__name="t3"))),
)
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name="t4") | ~Q(tags__name="t3")),
Item.objects.filter(~(Q(tags__name="t4") | ~Q(tags__name="t3"))),
)
def test_nested_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name="t4")),
Item.objects.filter(~~Q(tags__name="t4")),
)
def test_double_exclude(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name="t4")),
Item.objects.filter(~~Q(tags__name="t4")),
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name="t4")),
Item.objects.filter(~Q(~Q(tags__name="t4"))),
)
def test_exclude_in(self):
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name__in=["t4", "t3"])),
Item.objects.filter(~Q(tags__name__in=["t4", "t3"])),
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name__in=["t4", "t3"])),
Item.objects.filter(~~Q(tags__name__in=["t4", "t3"])),
)
def test_ticket_10790_1(self):
# Querying direct fields with isnull should trim the left outer join.
# It also should not create INNER JOIN.
q = Tag.objects.filter(parent__isnull=True)
self.assertSequenceEqual(q, [self.t1])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.filter(parent__isnull=False)
self.assertSequenceEqual(q, [self.t2, self.t3, self.t4, self.t5])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.exclude(parent__isnull=True)
self.assertSequenceEqual(q, [self.t2, self.t3, self.t4, self.t5])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.exclude(parent__isnull=False)
self.assertSequenceEqual(q, [self.t1])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.exclude(parent__parent__isnull=False)
self.assertSequenceEqual(q, [self.t1, self.t2, self.t3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
self.assertNotIn("INNER JOIN", str(q.query))
def test_ticket_10790_2(self):
# Querying across several tables should strip only the last outer join,
# while preserving the preceding inner joins.
q = Tag.objects.filter(parent__parent__isnull=False)
self.assertSequenceEqual(q, [self.t4, self.t5])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
# Querying without isnull should not convert anything to left outer join.
q = Tag.objects.filter(parent__parent=self.t1)
self.assertSequenceEqual(q, [self.t4, self.t5])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
def test_ticket_10790_3(self):
# Querying via indirect fields should populate the left outer join
q = NamedCategory.objects.filter(tag__isnull=True)
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
# join to dumbcategory ptr_id
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
self.assertSequenceEqual(q, [])
# Querying across several tables should strip only the last join, while
# preserving the preceding left outer joins.
q = NamedCategory.objects.filter(tag__parent__isnull=True)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
self.assertSequenceEqual(q, [self.nc1])
def test_ticket_10790_4(self):
# Querying across m2m field should not strip the m2m table from join.
q = Author.objects.filter(item__tags__isnull=True)
self.assertSequenceEqual(q, [self.a2, self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 2)
self.assertNotIn("INNER JOIN", str(q.query))
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 3)
self.assertNotIn("INNER JOIN", str(q.query))
def test_ticket_10790_5(self):
# Querying with isnull=False across m2m field should not create outer joins
q = Author.objects.filter(item__tags__isnull=False)
self.assertSequenceEqual(q, [self.a1, self.a1, self.a2, self.a2, self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 2)
q = Author.objects.filter(item__tags__parent__isnull=False)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 3)
q = Author.objects.filter(item__tags__parent__parent__isnull=False)
self.assertSequenceEqual(q, [self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 4)
def test_ticket_10790_6(self):
# Querying with isnull=True across m2m field should not create inner joins
# and strip last outer join
q = Author.objects.filter(item__tags__parent__parent__isnull=True)
self.assertSequenceEqual(
q,
[self.a1, self.a1, self.a2, self.a2, self.a2, self.a3],
)
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 4)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 3)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
def test_ticket_10790_7(self):
# Reverse querying with isnull should not strip the join
q = Author.objects.filter(item__isnull=True)
self.assertSequenceEqual(q, [self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
q = Author.objects.filter(item__isnull=False)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
def test_ticket_10790_8(self):
# Querying with combined q-objects should also strip the left outer join
q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1))
self.assertSequenceEqual(q, [self.t1, self.t2, self.t3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
def test_ticket_10790_combine(self):
# Combining queries should not re-populate the left outer join
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__isnull=False)
q3 = q1 | q2
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3, self.t4, self.t5])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q3 = q1 & q2
self.assertSequenceEqual(q3, [])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q2 = Tag.objects.filter(parent=self.t1)
q3 = q1 | q2
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q3 = q2 | q1
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__parent__isnull=True)
q3 = q1 | q2
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q3 = q2 | q1
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
def test_ticket19672(self):
self.assertSequenceEqual(
Report.objects.filter(
Q(creator__isnull=False) & ~Q(creator__extra__value=41)
),
[self.r1],
)
def test_ticket_20250(self):
# A negated Q along with an annotated queryset failed in Django 1.4
qs = Author.objects.annotate(Count("item"))
qs = qs.filter(~Q(extra__value=0)).order_by("name")
self.assertIn("SELECT", str(qs.query))
self.assertSequenceEqual(qs, [self.a1, self.a2, self.a3, self.a4])
def test_lookup_constraint_fielderror(self):
msg = (
"Cannot resolve keyword 'unknown_field' into field. Choices are: "
"annotation, category, category_id, children, id, item, "
"managedmodel, name, note, parent, parent_id"
)
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.filter(unknown_field__name="generic")
def test_common_mixed_case_foreign_keys(self):
"""
Valid query should be generated when fields fetched from joined tables
include FKs whose names only differ by case.
"""
c1 = SimpleCategory.objects.create(name="c1")
c2 = SimpleCategory.objects.create(name="c2")
c3 = SimpleCategory.objects.create(name="c3")
category = CategoryItem.objects.create(category=c1)
mixed_case_field_category = MixedCaseFieldCategoryItem.objects.create(
CaTeGoRy=c2
)
mixed_case_db_column_category = MixedCaseDbColumnCategoryItem.objects.create(
category=c3
)
CommonMixedCaseForeignKeys.objects.create(
category=category,
mixed_case_field_category=mixed_case_field_category,
mixed_case_db_column_category=mixed_case_db_column_category,
)
qs = CommonMixedCaseForeignKeys.objects.values(
"category",
"mixed_case_field_category",
"mixed_case_db_column_category",
"category__category",
"mixed_case_field_category__CaTeGoRy",
"mixed_case_db_column_category__category",
)
self.assertTrue(qs.first())
def test_excluded_intermediary_m2m_table_joined(self):
self.assertSequenceEqual(
Note.objects.filter(~Q(tag__annotation__name=F("note"))),
[self.n1, self.n2, self.n3],
)
self.assertSequenceEqual(
Note.objects.filter(tag__annotation__name="a1").filter(
~Q(tag__annotation__name=F("note"))
),
[],
)
def test_field_with_filterable(self):
self.assertSequenceEqual(
Author.objects.filter(extra=self.e2),
[self.a3, self.a4],
)
def test_negate_field(self):
self.assertSequenceEqual(
Note.objects.filter(negate=True),
[self.n1, self.n2],
)
self.assertSequenceEqual(Note.objects.exclude(negate=True), [self.n3])
class Queries2Tests(TestCase):
@classmethod
def setUpTestData(cls):
cls.num4 = Number.objects.create(num=4)
cls.num8 = Number.objects.create(num=8)
cls.num12 = Number.objects.create(num=12)
def test_ticket4289(self):
# A slight variation on the restricting the filtering choices by the
# lookup constraints.
self.assertSequenceEqual(Number.objects.filter(num__lt=4), [])
self.assertSequenceEqual(Number.objects.filter(num__gt=8, num__lt=12), [])
self.assertSequenceEqual(
Number.objects.filter(num__gt=8, num__lt=13),
[self.num12],
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)), []
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)), []
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)), []
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)),
[self.num8],
)
def test_ticket12239(self):
# Custom lookups are registered to round float values correctly on gte
# and lt IntegerField queries.
self.assertSequenceEqual(
Number.objects.filter(num__gt=11.9),
[self.num12],
)
self.assertSequenceEqual(Number.objects.filter(num__gt=12), [])
self.assertSequenceEqual(Number.objects.filter(num__gt=12.0), [])
self.assertSequenceEqual(Number.objects.filter(num__gt=12.1), [])
self.assertCountEqual(
Number.objects.filter(num__lt=12),
[self.num4, self.num8],
)
self.assertCountEqual(
Number.objects.filter(num__lt=12.0),
[self.num4, self.num8],
)
self.assertCountEqual(
Number.objects.filter(num__lt=12.1),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__gte=11.9),
[self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__gte=12),
[self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__gte=12.0),
[self.num12],
)
self.assertSequenceEqual(Number.objects.filter(num__gte=12.1), [])
self.assertSequenceEqual(Number.objects.filter(num__gte=12.9), [])
self.assertCountEqual(
Number.objects.filter(num__lte=11.9),
[self.num4, self.num8],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12.0),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12.1),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12.9),
[self.num4, self.num8, self.num12],
)
def test_ticket7759(self):
# Count should work with a partially read result set.
count = Number.objects.count()
qs = Number.objects.all()
def run():
for obj in qs:
return qs.count() == count
self.assertTrue(run())
class Queries3Tests(TestCase):
def test_ticket7107(self):
# This shouldn't create an infinite loop.
self.assertQuerysetEqual(Valid.objects.all(), [])
def test_datetimes_invalid_field(self):
# An error should be raised when QuerySet.datetimes() is passed the
# wrong type of field.
msg = "'name' isn't a DateField, TimeField, or DateTimeField."
with self.assertRaisesMessage(TypeError, msg):
Item.objects.datetimes("name", "month")
def test_ticket22023(self):
with self.assertRaisesMessage(
TypeError, "Cannot call only() after .values() or .values_list()"
):
Valid.objects.values().only()
with self.assertRaisesMessage(
TypeError, "Cannot call defer() after .values() or .values_list()"
):
Valid.objects.values().defer()
class Queries4Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name="t1", category=generic)
n1 = Note.objects.create(note="n1", misc="foo")
n2 = Note.objects.create(note="n2", misc="bar")
e1 = ExtraInfo.objects.create(info="e1", note=n1)
e2 = ExtraInfo.objects.create(info="e2", note=n2)
cls.a1 = Author.objects.create(name="a1", num=1001, extra=e1)
cls.a3 = Author.objects.create(name="a3", num=3003, extra=e2)
cls.r1 = Report.objects.create(name="r1", creator=cls.a1)
cls.r2 = Report.objects.create(name="r2", creator=cls.a3)
cls.r3 = Report.objects.create(name="r3")
cls.i1 = Item.objects.create(
name="i1", created=datetime.datetime.now(), note=n1, creator=cls.a1
)
cls.i2 = Item.objects.create(
name="i2", created=datetime.datetime.now(), note=n1, creator=cls.a3
)
def test_ticket24525(self):
tag = Tag.objects.create()
anth100 = tag.note_set.create(note="ANTH", misc="100")
math101 = tag.note_set.create(note="MATH", misc="101")
s1 = tag.annotation_set.create(name="1")
s2 = tag.annotation_set.create(name="2")
s1.notes.set([math101, anth100])
s2.notes.set([math101])
result = math101.annotation_set.all() & tag.annotation_set.exclude(
notes__in=[anth100]
)
self.assertEqual(list(result), [s2])
def test_ticket11811(self):
unsaved_category = NamedCategory(name="Other")
msg = (
"Unsaved model instance <NamedCategory: Other> cannot be used in an ORM "
"query."
)
with self.assertRaisesMessage(ValueError, msg):
Tag.objects.filter(pk=self.t1.pk).update(category=unsaved_category)
def test_ticket14876(self):
# Note: when combining the query we need to have information available
# about the join type of the trimmed "creator__isnull" join. If we
# don't have that information, then the join is created as INNER JOIN
# and results will be incorrect.
q1 = Report.objects.filter(
Q(creator__isnull=True) | Q(creator__extra__info="e1")
)
q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter(
Q(creator__extra__info="e1")
)
self.assertCountEqual(q1, [self.r1, self.r3])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Report.objects.filter(
Q(creator__extra__info="e1") | Q(creator__isnull=True)
)
q2 = Report.objects.filter(
Q(creator__extra__info="e1")
) | Report.objects.filter(Q(creator__isnull=True))
self.assertCountEqual(q1, [self.r1, self.r3])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(
Q(creator=self.a1) | Q(creator__report__name="r1")
).order_by()
q2 = (
Item.objects.filter(Q(creator=self.a1)).order_by()
| Item.objects.filter(Q(creator__report__name="r1")).order_by()
)
self.assertCountEqual(q1, [self.i1])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(
Q(creator__report__name="e1") | Q(creator=self.a1)
).order_by()
q2 = (
Item.objects.filter(Q(creator__report__name="e1")).order_by()
| Item.objects.filter(Q(creator=self.a1)).order_by()
)
self.assertCountEqual(q1, [self.i1])
self.assertEqual(str(q1.query), str(q2.query))
def test_combine_join_reuse(self):
# Joins having identical connections are correctly recreated in the
# rhs query, in case the query is ORed together (#18748).
Report.objects.create(name="r4", creator=self.a1)
q1 = Author.objects.filter(report__name="r5")
q2 = Author.objects.filter(report__name="r4").filter(report__name="r1")
combined = q1 | q2
self.assertEqual(str(combined.query).count("JOIN"), 2)
self.assertEqual(len(combined), 1)
self.assertEqual(combined[0].name, "a1")
def test_combine_or_filter_reuse(self):
combined = Author.objects.filter(name="a1") | Author.objects.filter(name="a3")
self.assertEqual(combined.get(name="a1"), self.a1)
def test_join_reuse_order(self):
# Join aliases are reused in order. This shouldn't raise AssertionError
# because change_map contains a circular reference (#26522).
s1 = School.objects.create()
s2 = School.objects.create()
s3 = School.objects.create()
t1 = Teacher.objects.create()
otherteachers = Teacher.objects.exclude(pk=t1.pk).exclude(friends=t1)
qs1 = otherteachers.filter(schools=s1).filter(schools=s2)
qs2 = otherteachers.filter(schools=s1).filter(schools=s3)
self.assertQuerysetEqual(qs1 | qs2, [])
def test_ticket7095(self):
# Updates that are filtered on the model being updated are somewhat
# tricky in MySQL.
ManagedModel.objects.create(data="mm1", tag=self.t1, public=True)
self.assertEqual(ManagedModel.objects.update(data="mm"), 1)
# A values() or values_list() query across joined models must use outer
# joins appropriately.
# Note: In Oracle, we expect a null CharField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_charfield_repr = ""
else:
expected_null_charfield_repr = None
self.assertSequenceEqual(
Report.objects.values_list("creator__extra__info", flat=True).order_by(
"name"
),
["e1", "e2", expected_null_charfield_repr],
)
# Similarly for select_related(), joins beyond an initial nullable join
# must use outer joins so that all results are included.
self.assertSequenceEqual(
Report.objects.select_related("creator", "creator__extra").order_by("name"),
[self.r1, self.r2, self.r3],
)
# When there are multiple paths to a table from another table, we have
# to be careful not to accidentally reuse an inappropriate join when
# using select_related(). We used to return the parent's Detail record
# here by mistake.
d1 = Detail.objects.create(data="d1")
d2 = Detail.objects.create(data="d2")
m1 = Member.objects.create(name="m1", details=d1)
m2 = Member.objects.create(name="m2", details=d2)
Child.objects.create(person=m2, parent=m1)
obj = m1.children.select_related("person__details")[0]
self.assertEqual(obj.person.details.data, "d2")
def test_order_by_resetting(self):
# Calling order_by() with no parameters removes any existing ordering on the
# model. But it should still be possible to add new ordering after that.
qs = Author.objects.order_by().order_by("name")
self.assertIn("ORDER BY", qs.query.get_compiler(qs.db).as_sql()[0])
def test_order_by_reverse_fk(self):
# It is possible to order by reverse of foreign key, although that can lead
# to duplicate results.
c1 = SimpleCategory.objects.create(name="category1")
c2 = SimpleCategory.objects.create(name="category2")
CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c1)
self.assertSequenceEqual(
SimpleCategory.objects.order_by("categoryitem", "pk"), [c1, c2, c1]
)
def test_filter_reverse_non_integer_pk(self):
date_obj = DateTimePK.objects.create()
extra_obj = ExtraInfo.objects.create(info="extra", date=date_obj)
self.assertEqual(
DateTimePK.objects.filter(extrainfo=extra_obj).get(),
date_obj,
)
def test_ticket10181(self):
# Avoid raising an EmptyResultSet if an inner query is probably
# empty (and hence, not executed).
self.assertQuerysetEqual(
Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])), []
)
def test_ticket15316_filter_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(
name="named category1", special_name="special1"
)
c3 = SpecialCategory.objects.create(
name="named category2", special_name="special2"
)
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 2)
self.assertCountEqual(qs, [ci2, ci3])
def test_ticket15316_exclude_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(
name="named category1", special_name="special1"
)
c3 = SpecialCategory.objects.create(
name="named category2", special_name="special2"
)
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_filter_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(
name="named category1", special_name="special1"
)
c3 = SpecialCategory.objects.create(
name="named category2", special_name="special2"
)
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_exclude_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(
name="named category1", special_name="special1"
)
c3 = SpecialCategory.objects.create(
name="named category2", special_name="special2"
)
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 2)
self.assertCountEqual(qs, [ci2, ci3])
def test_ticket15316_one2one_filter_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(
category__onetoonecategory__isnull=False
).order_by("pk")
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_one2one_exclude_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_one2one_filter_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_one2one_exclude_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(
category__onetoonecategory__isnull=True
).order_by("pk")
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
class Queries5Tests(TestCase):
@classmethod
def setUpTestData(cls):
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the
# Meta.ordering will be rank3, rank2, rank1.
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
cls.n2 = Note.objects.create(note="n2", misc="bar", id=2)
e1 = ExtraInfo.objects.create(info="e1", note=cls.n1)
e2 = ExtraInfo.objects.create(info="e2", note=cls.n2)
a1 = Author.objects.create(name="a1", num=1001, extra=e1)
a2 = Author.objects.create(name="a2", num=2002, extra=e1)
a3 = Author.objects.create(name="a3", num=3003, extra=e2)
cls.rank2 = Ranking.objects.create(rank=2, author=a2)
cls.rank1 = Ranking.objects.create(rank=1, author=a3)
cls.rank3 = Ranking.objects.create(rank=3, author=a1)
def test_ordering(self):
# Cross model ordering is possible in Meta, too.
self.assertSequenceEqual(
Ranking.objects.all(),
[self.rank3, self.rank2, self.rank1],
)
self.assertSequenceEqual(
Ranking.objects.order_by("rank"),
[self.rank1, self.rank2, self.rank3],
)
# Ordering of extra() pieces is possible, too and you can mix extra
# fields and model fields in the ordering.
self.assertSequenceEqual(
Ranking.objects.extra(
tables=["django_site"], order_by=["-django_site.id", "rank"]
),
[self.rank1, self.rank2, self.rank3],
)
sql = "case when %s > 2 then 1 else 0 end" % connection.ops.quote_name("rank")
qs = Ranking.objects.extra(select={"good": sql})
self.assertEqual(
[o.good for o in qs.extra(order_by=("-good",))], [True, False, False]
)
self.assertSequenceEqual(
qs.extra(order_by=("-good", "id")),
[self.rank3, self.rank2, self.rank1],
)
# Despite having some extra aliases in the query, we can still omit
# them in a values() query.
dicts = qs.values("id", "rank").order_by("id")
self.assertEqual([d["rank"] for d in dicts], [2, 1, 3])
def test_ticket7256(self):
# An empty values() call includes all aliases, including those from an
# extra()
sql = "case when %s > 2 then 1 else 0 end" % connection.ops.quote_name("rank")
qs = Ranking.objects.extra(select={"good": sql})
dicts = qs.values().order_by("id")
for d in dicts:
del d["id"]
del d["author_id"]
self.assertEqual(
[sorted(d.items()) for d in dicts],
[
[("good", 0), ("rank", 2)],
[("good", 0), ("rank", 1)],
[("good", 1), ("rank", 3)],
],
)
def test_ticket7045(self):
# Extra tables used to crash SQL construction on the second use.
qs = Ranking.objects.extra(tables=["django_site"])
qs.query.get_compiler(qs.db).as_sql()
# test passes if this doesn't raise an exception.
qs.query.get_compiler(qs.db).as_sql()
def test_ticket9848(self):
# Make sure that updates which only filter on sub-tables don't
# inadvertently update the wrong records (bug #9848).
author_start = Author.objects.get(name="a1")
ranking_start = Ranking.objects.get(author__name="a1")
# Make sure that the IDs from different tables don't happen to match.
self.assertSequenceEqual(
Ranking.objects.filter(author__name="a1"),
[self.rank3],
)
self.assertEqual(Ranking.objects.filter(author__name="a1").update(rank=4636), 1)
r = Ranking.objects.get(author__name="a1")
self.assertEqual(r.id, ranking_start.id)
self.assertEqual(r.author.id, author_start.id)
self.assertEqual(r.rank, 4636)
r.rank = 3
r.save()
self.assertSequenceEqual(
Ranking.objects.all(),
[self.rank3, self.rank2, self.rank1],
)
def test_ticket5261(self):
# Test different empty excludes.
self.assertSequenceEqual(
Note.objects.exclude(Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.filter(~Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.filter(~Q() | ~Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.exclude(~Q() & ~Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.exclude(~Q() ^ ~Q()),
[self.n1, self.n2],
)
def test_extra_select_literal_percent_s(self):
# Allow %%s to escape select clauses
self.assertEqual(Note.objects.extra(select={"foo": "'%%s'"})[0].foo, "%s")
self.assertEqual(
Note.objects.extra(select={"foo": "'%%s bar %%s'"})[0].foo, "%s bar %s"
)
self.assertEqual(
Note.objects.extra(select={"foo": "'bar %%s'"})[0].foo, "bar %s"
)
def test_extra_select_alias_sql_injection(self):
crafted_alias = """injected_name" from "queries_note"; --"""
msg = (
"Column aliases cannot contain whitespace characters, quotation marks, "
"semicolons, or SQL comments."
)
with self.assertRaisesMessage(ValueError, msg):
Note.objects.extra(select={crafted_alias: "1"})
def test_queryset_reuse(self):
# Using querysets doesn't mutate aliases.
authors = Author.objects.filter(Q(name="a1") | Q(name="nonexistent"))
self.assertEqual(Ranking.objects.filter(author__in=authors).get(), self.rank3)
self.assertEqual(authors.count(), 1)
def test_filter_unsaved_object(self):
# These tests will catch ValueError in Django 5.0 when passing unsaved
# model instances to related filters becomes forbidden.
# msg = "Model instances passed to related filters must be saved."
msg = "Passing unsaved model instances to related filters is deprecated."
company = Company.objects.create(name="Django")
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
Employment.objects.filter(employer=Company(name="unsaved"))
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
Employment.objects.filter(employer__in=[company, Company(name="unsaved")])
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
StaffUser.objects.filter(staff=Staff(name="unsaved"))
class SelectRelatedTests(TestCase):
def test_tickets_3045_3288(self):
# Once upon a time, select_related() with circular relations would loop
# infinitely if you forgot to specify "depth". Now we set an arbitrary
# default upper bound.
self.assertQuerysetEqual(X.objects.all(), [])
self.assertQuerysetEqual(X.objects.select_related(), [])
class SubclassFKTests(TestCase):
def test_ticket7778(self):
# Model subclasses could not be deleted if a nullable foreign key
# relates to a model that relates back.
num_celebs = Celebrity.objects.count()
tvc = TvChef.objects.create(name="Huey")
self.assertEqual(Celebrity.objects.count(), num_celebs + 1)
Fan.objects.create(fan_of=tvc)
Fan.objects.create(fan_of=tvc)
tvc.delete()
# The parent object should have been deleted as well.
self.assertEqual(Celebrity.objects.count(), num_celebs)
class CustomPkTests(TestCase):
def test_ticket7371(self):
self.assertQuerysetEqual(Related.objects.order_by("custom"), [])
class NullableRelOrderingTests(TestCase):
def test_ticket10028(self):
# Ordering by model related to nullable relations(!) should use outer
# joins, so that all results are included.
p1 = Plaything.objects.create(name="p1")
self.assertSequenceEqual(Plaything.objects.all(), [p1])
def test_join_already_in_query(self):
# Ordering by model related to nullable relations should not change
# the join type of already existing joins.
Plaything.objects.create(name="p1")
s = SingleObject.objects.create(name="s")
r = RelatedObject.objects.create(single=s, f=1)
p2 = Plaything.objects.create(name="p2", others=r)
qs = Plaything.objects.filter(others__isnull=False).order_by("pk")
self.assertNotIn("JOIN", str(qs.query))
qs = Plaything.objects.filter(others__f__isnull=False).order_by("pk")
self.assertIn("INNER", str(qs.query))
qs = qs.order_by("others__single__name")
# The ordering by others__single__pk will add one new join (to single)
# and that join must be LEFT join. The already existing join to related
# objects must be kept INNER. So, we have both an INNER and a LEFT join
# in the query.
self.assertEqual(str(qs.query).count("LEFT"), 1)
self.assertEqual(str(qs.query).count("INNER"), 1)
self.assertSequenceEqual(qs, [p2])
class DisjunctiveFilterTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
cls.e1 = ExtraInfo.objects.create(info="e1", note=cls.n1)
def test_ticket7872(self):
# Another variation on the disjunctive filtering theme.
# For the purposes of this regression test, it's important that there is no
# Join object related to the LeafA we create.
l1 = LeafA.objects.create(data="first")
self.assertSequenceEqual(LeafA.objects.all(), [l1])
self.assertSequenceEqual(
LeafA.objects.filter(Q(data="first") | Q(join__b__data="second")),
[l1],
)
def test_ticket8283(self):
# Checking that applying filters after a disjunction works correctly.
self.assertSequenceEqual(
(
ExtraInfo.objects.filter(note=self.n1)
| ExtraInfo.objects.filter(info="e2")
).filter(note=self.n1),
[self.e1],
)
self.assertSequenceEqual(
(
ExtraInfo.objects.filter(info="e2")
| ExtraInfo.objects.filter(note=self.n1)
).filter(note=self.n1),
[self.e1],
)
class Queries6Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name="t1", category=generic)
cls.t2 = Tag.objects.create(name="t2", parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name="t3", parent=cls.t1)
cls.t4 = Tag.objects.create(name="t4", parent=cls.t3)
cls.t5 = Tag.objects.create(name="t5", parent=cls.t3)
n1 = Note.objects.create(note="n1", misc="foo", id=1)
cls.ann1 = Annotation.objects.create(name="a1", tag=cls.t1)
cls.ann1.notes.add(n1)
cls.ann2 = Annotation.objects.create(name="a2", tag=cls.t4)
def test_parallel_iterators(self):
# Parallel iterators work.
qs = Tag.objects.all()
i1, i2 = iter(qs), iter(qs)
self.assertEqual(repr(next(i1)), "<Tag: t1>")
self.assertEqual(repr(next(i1)), "<Tag: t2>")
self.assertEqual(repr(next(i2)), "<Tag: t1>")
self.assertEqual(repr(next(i2)), "<Tag: t2>")
self.assertEqual(repr(next(i2)), "<Tag: t3>")
self.assertEqual(repr(next(i1)), "<Tag: t3>")
qs = X.objects.all()
self.assertFalse(qs)
self.assertFalse(qs)
def test_nested_queries_sql(self):
# Nested queries should not evaluate the inner query as part of constructing the
# SQL (so we should see a nested query here, indicated by two "SELECT" calls).
qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy"))
self.assertEqual(qs.query.get_compiler(qs.db).as_sql()[0].count("SELECT"), 2)
def test_tickets_8921_9188(self):
# Incorrect SQL was being generated for certain types of exclude()
# queries that crossed multi-valued relations (#8921, #9188 and some
# preemptively discovered cases).
self.assertSequenceEqual(
PointerA.objects.filter(connection__pointerb__id=1), []
)
self.assertSequenceEqual(
PointerA.objects.exclude(connection__pointerb__id=1), []
)
self.assertSequenceEqual(
Tag.objects.exclude(children=None),
[self.t1, self.t3],
)
# This example is tricky because the parent could be NULL, so only checking
# parents with annotations omits some results (tag t1, in this case).
self.assertSequenceEqual(
Tag.objects.exclude(parent__annotation__name="a1"),
[self.t1, self.t4, self.t5],
)
# The annotation->tag link is single values and tag->children links is
# multi-valued. So we have to split the exclude filter in the middle
# and then optimize the inner query without losing results.
self.assertSequenceEqual(
Annotation.objects.exclude(tag__children__name="t2"),
[self.ann2],
)
# Nested queries are possible (although should be used with care, since
# they have performance problems on backends like MySQL.
self.assertSequenceEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")),
[self.ann1],
)
def test_ticket3739(self):
# The all() method on querysets returns a copy of the queryset.
q1 = Tag.objects.order_by("name")
self.assertIsNot(q1, q1.all())
def test_ticket_11320(self):
qs = Tag.objects.exclude(category=None).exclude(category__name="foo")
self.assertEqual(str(qs.query).count(" INNER JOIN "), 1)
def test_distinct_ordered_sliced_subquery_aggregation(self):
self.assertEqual(
Tag.objects.distinct().order_by("category__name")[:3].count(), 3
)
def test_multiple_columns_with_the_same_name_slice(self):
self.assertEqual(
list(
Tag.objects.order_by("name").values_list("name", "category__name")[:2]
),
[("t1", "Generic"), ("t2", "Generic")],
)
self.assertSequenceEqual(
Tag.objects.order_by("name").select_related("category")[:2],
[self.t1, self.t2],
)
self.assertEqual(
list(Tag.objects.order_by("-name").values_list("name", "parent__name")[:2]),
[("t5", "t3"), ("t4", "t3")],
)
self.assertSequenceEqual(
Tag.objects.order_by("-name").select_related("parent")[:2],
[self.t5, self.t4],
)
def test_col_alias_quoted(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertEqual(
Tag.objects.values("parent")
.annotate(
tag_per_parent=Count("pk"),
)
.aggregate(Max("tag_per_parent")),
{"tag_per_parent__max": 2},
)
sql = captured_queries[0]["sql"]
self.assertIn("AS %s" % connection.ops.quote_name("col1"), sql)
def test_xor_subquery(self):
self.assertSequenceEqual(
Tag.objects.filter(
Exists(Tag.objects.filter(id=OuterRef("id"), name="t3"))
^ Exists(Tag.objects.filter(id=OuterRef("id"), parent=self.t1))
),
[self.t2],
)
class RawQueriesTests(TestCase):
@classmethod
def setUpTestData(cls):
Note.objects.create(note="n1", misc="foo", id=1)
def test_ticket14729(self):
# Test representation of raw query with one or few parameters passed as list
query = "SELECT * FROM queries_note WHERE note = %s"
params = ["n1"]
qs = Note.objects.raw(query, params=params)
self.assertEqual(
repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1>"
)
query = "SELECT * FROM queries_note WHERE note = %s and misc = %s"
params = ["n1", "foo"]
qs = Note.objects.raw(query, params=params)
self.assertEqual(
repr(qs),
"<RawQuerySet: SELECT * FROM queries_note WHERE note = n1 and misc = foo>",
)
class GeneratorExpressionTests(SimpleTestCase):
def test_ticket10432(self):
# Using an empty iterator as the rvalue for an "__in"
# lookup is legal.
self.assertCountEqual(Note.objects.filter(pk__in=iter(())), [])
class ComparisonTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
e1 = ExtraInfo.objects.create(info="e1", note=cls.n1)
cls.a2 = Author.objects.create(name="a2", num=2002, extra=e1)
def test_ticket8597(self):
# Regression tests for case-insensitive comparisons
item_ab = Item.objects.create(
name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1
)
item_xy = Item.objects.create(
name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1
)
self.assertSequenceEqual(
Item.objects.filter(name__iexact="A_b"),
[item_ab],
)
self.assertSequenceEqual(
Item.objects.filter(name__iexact="x%Y"),
[item_xy],
)
self.assertSequenceEqual(
Item.objects.filter(name__istartswith="A_b"),
[item_ab],
)
self.assertSequenceEqual(
Item.objects.filter(name__iendswith="A_b"),
[item_ab],
)
class ExistsSql(TestCase):
def test_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertFalse(Tag.objects.exists())
# Ok - so the exist query worked - but did it include too many columns?
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]["sql"]
id, name = connection.ops.quote_name("id"), connection.ops.quote_name("name")
self.assertNotIn(id, qstr)
self.assertNotIn(name, qstr)
def test_distinct_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertIs(Article.objects.distinct().exists(), False)
self.assertEqual(len(captured_queries), 1)
captured_sql = captured_queries[0]["sql"]
self.assertNotIn(connection.ops.quote_name("id"), captured_sql)
self.assertNotIn(connection.ops.quote_name("name"), captured_sql)
def test_sliced_distinct_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertIs(Article.objects.distinct()[1:3].exists(), False)
self.assertEqual(len(captured_queries), 1)
captured_sql = captured_queries[0]["sql"]
self.assertIn(connection.ops.quote_name("id"), captured_sql)
self.assertIn(connection.ops.quote_name("name"), captured_sql)
def test_ticket_18414(self):
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="two", created=datetime.datetime.now())
self.assertTrue(Article.objects.exists())
self.assertTrue(Article.objects.distinct().exists())
self.assertTrue(Article.objects.distinct()[1:3].exists())
self.assertFalse(Article.objects.distinct()[1:1].exists())
@skipUnlessDBFeature("can_distinct_on_fields")
def test_ticket_18414_distinct_on(self):
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="two", created=datetime.datetime.now())
self.assertTrue(Article.objects.distinct("name").exists())
self.assertTrue(Article.objects.distinct("name")[1:2].exists())
self.assertFalse(Article.objects.distinct("name")[2:3].exists())
class QuerysetOrderedTests(unittest.TestCase):
"""
Tests for the Queryset.ordered attribute.
"""
def test_no_default_or_explicit_ordering(self):
self.assertIs(Annotation.objects.all().ordered, False)
def test_cleared_default_ordering(self):
self.assertIs(Tag.objects.all().ordered, True)
self.assertIs(Tag.objects.order_by().ordered, False)
def test_explicit_ordering(self):
self.assertIs(Annotation.objects.order_by("id").ordered, True)
def test_empty_queryset(self):
self.assertIs(Annotation.objects.none().ordered, True)
def test_order_by_extra(self):
self.assertIs(Annotation.objects.extra(order_by=["id"]).ordered, True)
def test_annotated_ordering(self):
qs = Annotation.objects.annotate(num_notes=Count("notes"))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by("num_notes").ordered, True)
def test_annotated_default_ordering(self):
qs = Tag.objects.annotate(num_notes=Count("pk"))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by("name").ordered, True)
def test_annotated_values_default_ordering(self):
qs = Tag.objects.values("name").annotate(num_notes=Count("pk"))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by("name").ordered, True)
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
class SubqueryTests(TestCase):
@classmethod
def setUpTestData(cls):
NamedCategory.objects.create(id=1, name="first")
NamedCategory.objects.create(id=2, name="second")
NamedCategory.objects.create(id=3, name="third")
NamedCategory.objects.create(id=4, name="fourth")
def test_ordered_subselect(self):
"Subselects honor any manual ordering"
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[0:2]
)
self.assertEqual(set(query.values_list("id", flat=True)), {3, 4})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[:2]
)
self.assertEqual(set(query.values_list("id", flat=True)), {3, 4})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[1:2]
)
self.assertEqual(set(query.values_list("id", flat=True)), {3})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[2:]
)
self.assertEqual(set(query.values_list("id", flat=True)), {1, 2})
def test_slice_subquery_and_query(self):
"""
Slice a query that has a sliced subquery
"""
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[0:2]
)[0:2]
self.assertEqual({x.id for x in query}, {3, 4})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[1:3]
)[1:3]
self.assertEqual({x.id for x in query}, {3})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[2:]
)[1:]
self.assertEqual({x.id for x in query}, {2})
def test_related_sliced_subquery(self):
"""
Related objects constraints can safely contain sliced subqueries.
refs #22434
"""
generic = NamedCategory.objects.create(id=5, name="Generic")
t1 = Tag.objects.create(name="t1", category=generic)
t2 = Tag.objects.create(name="t2", category=generic)
ManagedModel.objects.create(data="mm1", tag=t1, public=True)
mm2 = ManagedModel.objects.create(data="mm2", tag=t2, public=True)
query = ManagedModel.normal_manager.filter(
tag__in=Tag.objects.order_by("-id")[:1]
)
self.assertEqual({x.id for x in query}, {mm2.id})
def test_sliced_delete(self):
"Delete queries can safely contain sliced subqueries"
DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[0:1]
).delete()
self.assertEqual(
set(DumbCategory.objects.values_list("id", flat=True)), {1, 2, 3}
)
DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[1:2]
).delete()
self.assertEqual(set(DumbCategory.objects.values_list("id", flat=True)), {1, 3})
DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[1:]
).delete()
self.assertEqual(set(DumbCategory.objects.values_list("id", flat=True)), {3})
def test_distinct_ordered_sliced_subquery(self):
# Implicit values('id').
self.assertSequenceEqual(
NamedCategory.objects.filter(
id__in=NamedCategory.objects.distinct().order_by("name")[0:2],
)
.order_by("name")
.values_list("name", flat=True),
["first", "fourth"],
)
# Explicit values('id').
self.assertSequenceEqual(
NamedCategory.objects.filter(
id__in=NamedCategory.objects.distinct()
.order_by("-name")
.values("id")[0:2],
)
.order_by("name")
.values_list("name", flat=True),
["second", "third"],
)
# Annotated value.
self.assertSequenceEqual(
DumbCategory.objects.filter(
id__in=DumbCategory.objects.annotate(double_id=F("id") * 2)
.order_by("id")
.distinct()
.values("double_id")[0:2],
)
.order_by("id")
.values_list("id", flat=True),
[2, 4],
)
class QuerySetBitwiseOperationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.school = School.objects.create()
cls.room_1 = Classroom.objects.create(
school=cls.school, has_blackboard=False, name="Room 1"
)
cls.room_2 = Classroom.objects.create(
school=cls.school, has_blackboard=True, name="Room 2"
)
cls.room_3 = Classroom.objects.create(
school=cls.school, has_blackboard=True, name="Room 3"
)
cls.room_4 = Classroom.objects.create(
school=cls.school, has_blackboard=False, name="Room 4"
)
tag = Tag.objects.create()
cls.annotation_1 = Annotation.objects.create(tag=tag)
annotation_2 = Annotation.objects.create(tag=tag)
note = cls.annotation_1.notes.create(tag=tag)
cls.base_user_1 = BaseUser.objects.create(annotation=cls.annotation_1)
cls.base_user_2 = BaseUser.objects.create(annotation=annotation_2)
cls.task = Task.objects.create(
owner=cls.base_user_2,
creator=cls.base_user_2,
note=note,
)
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_or_with_rhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)
qs2 = Classroom.objects.filter(has_blackboard=False)[:1]
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_3])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_or_with_lhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)[:1]
qs2 = Classroom.objects.filter(has_blackboard=False)
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_4])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_or_with_both_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=False)[:1]
qs2 = Classroom.objects.filter(has_blackboard=True)[:1]
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_or_with_both_slice_and_ordering(self):
qs1 = Classroom.objects.filter(has_blackboard=False).order_by("-pk")[:1]
qs2 = Classroom.objects.filter(has_blackboard=True).order_by("-name")[:1]
self.assertCountEqual(qs1 | qs2, [self.room_3, self.room_4])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_xor_with_rhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)
qs2 = Classroom.objects.filter(has_blackboard=False)[:1]
self.assertCountEqual(qs1 ^ qs2, [self.room_1, self.room_2, self.room_3])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_xor_with_lhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)[:1]
qs2 = Classroom.objects.filter(has_blackboard=False)
self.assertCountEqual(qs1 ^ qs2, [self.room_1, self.room_2, self.room_4])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_xor_with_both_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=False)[:1]
qs2 = Classroom.objects.filter(has_blackboard=True)[:1]
self.assertCountEqual(qs1 ^ qs2, [self.room_1, self.room_2])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_xor_with_both_slice_and_ordering(self):
qs1 = Classroom.objects.filter(has_blackboard=False).order_by("-pk")[:1]
qs2 = Classroom.objects.filter(has_blackboard=True).order_by("-name")[:1]
self.assertCountEqual(qs1 ^ qs2, [self.room_3, self.room_4])
def test_subquery_aliases(self):
combined = School.objects.filter(pk__isnull=False) & School.objects.filter(
Exists(
Classroom.objects.filter(
has_blackboard=True,
school=OuterRef("pk"),
)
),
)
self.assertSequenceEqual(combined, [self.school])
nested_combined = School.objects.filter(pk__in=combined.values("pk"))
self.assertSequenceEqual(nested_combined, [self.school])
def test_conflicting_aliases_during_combine(self):
qs1 = self.annotation_1.baseuser_set.all()
qs2 = BaseUser.objects.filter(
Q(owner__note__in=self.annotation_1.notes.all())
| Q(creator__note__in=self.annotation_1.notes.all())
)
self.assertSequenceEqual(qs1, [self.base_user_1])
self.assertSequenceEqual(qs2, [self.base_user_2])
self.assertCountEqual(qs2 | qs1, qs1 | qs2)
self.assertCountEqual(qs2 | qs1, [self.base_user_1, self.base_user_2])
class CloneTests(TestCase):
def test_evaluated_queryset_as_argument(self):
"""
If a queryset is already evaluated, it can still be used as a query arg.
"""
n = Note(note="Test1", misc="misc")
n.save()
e = ExtraInfo(info="good", note=n)
e.save()
n_list = Note.objects.all()
# Evaluate the Note queryset, populating the query cache
list(n_list)
# Make one of cached results unpickable.
n_list._result_cache[0].lock = Lock()
with self.assertRaises(TypeError):
pickle.dumps(n_list)
# Use the note queryset in a query, and evaluate
# that query in a way that involves cloning.
self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, "good")
def test_no_model_options_cloning(self):
"""
Cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta)
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail(
"Model options shouldn't be cloned."
)
try:
Note.objects.filter(pk__lte=F("pk") + 1).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
def test_no_fields_cloning(self):
"""
Cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta.get_field("misc"))
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail(
"Model fields shouldn't be cloned"
)
try:
Note.objects.filter(note=F("misc")).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
class EmptyQuerySetTests(SimpleTestCase):
def test_emptyqueryset_values(self):
# #14366 -- Calling .values() on an empty QuerySet and then cloning
# that should not cause an error
self.assertCountEqual(Number.objects.none().values("num").order_by("num"), [])
def test_values_subquery(self):
self.assertCountEqual(
Number.objects.filter(pk__in=Number.objects.none().values("pk")), []
)
self.assertCountEqual(
Number.objects.filter(pk__in=Number.objects.none().values_list("pk")), []
)
def test_ticket_19151(self):
# #19151 -- Calling .values() or .values_list() on an empty QuerySet
# should return an empty QuerySet and not cause an error.
q = Author.objects.none()
self.assertCountEqual(q.values(), [])
self.assertCountEqual(q.values_list(), [])
class ValuesQuerysetTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=72)
def test_flat_values_list(self):
qs = Number.objects.values_list("num")
qs = qs.values_list("num", flat=True)
self.assertSequenceEqual(qs, [72])
def test_extra_values(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={"value_plus_x": "num+%s", "value_minus_x": "num-%s"},
select_params=(1, 2),
)
qs = qs.order_by("value_minus_x")
qs = qs.values("num")
self.assertSequenceEqual(qs, [{"num": 72}])
def test_extra_values_order_twice(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={"value_plus_one": "num+1", "value_minus_one": "num-1"}
)
qs = qs.order_by("value_minus_one").order_by("value_plus_one")
qs = qs.values("num")
self.assertSequenceEqual(qs, [{"num": 72}])
def test_extra_values_order_multiple(self):
# Postgres doesn't allow constants in order by, so check for that.
qs = Number.objects.extra(
select={
"value_plus_one": "num+1",
"value_minus_one": "num-1",
"constant_value": "1",
}
)
qs = qs.order_by("value_plus_one", "value_minus_one", "constant_value")
qs = qs.values("num")
self.assertSequenceEqual(qs, [{"num": 72}])
def test_extra_values_order_in_extra(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={"value_plus_one": "num+1", "value_minus_one": "num-1"},
order_by=["value_minus_one"],
)
qs = qs.values("num")
def test_extra_select_params_values_order_in_extra(self):
# testing for 23259 issue
qs = Number.objects.extra(
select={"value_plus_x": "num+%s"},
select_params=[1],
order_by=["value_plus_x"],
)
qs = qs.filter(num=72)
qs = qs.values("num")
self.assertSequenceEqual(qs, [{"num": 72}])
def test_extra_multiple_select_params_values_order_by(self):
# testing for 23259 issue
qs = Number.objects.extra(
select={"value_plus_x": "num+%s", "value_minus_x": "num-%s"},
select_params=(72, 72),
)
qs = qs.order_by("value_minus_x")
qs = qs.filter(num=1)
qs = qs.values("num")
self.assertSequenceEqual(qs, [])
def test_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={"value_plus_one": "num+1"})
qs = qs.order_by("value_plus_one")
qs = qs.values_list("num")
self.assertSequenceEqual(qs, [(72,)])
def test_flat_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={"value_plus_one": "num+1"})
qs = qs.order_by("value_plus_one")
qs = qs.values_list("num", flat=True)
self.assertSequenceEqual(qs, [72])
def test_field_error_values_list(self):
# see #23443
msg = (
"Cannot resolve keyword %r into field. Join on 'name' not permitted."
% "foo"
)
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.values_list("name__foo")
def test_named_values_list_flat(self):
msg = "'flat' and 'named' can't be used together."
with self.assertRaisesMessage(TypeError, msg):
Number.objects.values_list("num", flat=True, named=True)
def test_named_values_list_bad_field_name(self):
msg = "Type names and field names must be valid identifiers: '1'"
with self.assertRaisesMessage(ValueError, msg):
Number.objects.extra(select={"1": "num+1"}).values_list(
"1", named=True
).first()
def test_named_values_list_with_fields(self):
qs = Number.objects.extra(select={"num2": "num+1"}).annotate(Count("id"))
values = qs.values_list("num", "num2", named=True).first()
self.assertEqual(type(values).__name__, "Row")
self.assertEqual(values._fields, ("num", "num2"))
self.assertEqual(values.num, 72)
self.assertEqual(values.num2, 73)
def test_named_values_list_without_fields(self):
qs = Number.objects.extra(select={"num2": "num+1"}).annotate(Count("id"))
values = qs.values_list(named=True).first()
self.assertEqual(type(values).__name__, "Row")
self.assertEqual(
values._fields,
("num2", "id", "num", "other_num", "another_num", "id__count"),
)
self.assertEqual(values.num, 72)
self.assertEqual(values.num2, 73)
self.assertEqual(values.id__count, 1)
def test_named_values_list_expression_with_default_alias(self):
expr = Count("id")
values = (
Number.objects.annotate(id__count1=expr)
.values_list(expr, "id__count1", named=True)
.first()
)
self.assertEqual(values._fields, ("id__count2", "id__count1"))
def test_named_values_list_expression(self):
expr = F("num") + 1
qs = Number.objects.annotate(combinedexpression1=expr).values_list(
expr, "combinedexpression1", named=True
)
values = qs.first()
self.assertEqual(values._fields, ("combinedexpression2", "combinedexpression1"))
def test_named_values_pickle(self):
value = Number.objects.values_list("num", "other_num", named=True).get()
self.assertEqual(value, (72, None))
self.assertEqual(pickle.loads(pickle.dumps(value)), value)
class QuerySetSupportsPythonIdioms(TestCase):
@classmethod
def setUpTestData(cls):
some_date = datetime.datetime(2014, 5, 16, 12, 1)
cls.articles = [
Article.objects.create(name=f"Article {i}", created=some_date)
for i in range(1, 8)
]
def get_ordered_articles(self):
return Article.objects.order_by("name")
def test_can_get_items_using_index_and_slice_notation(self):
self.assertEqual(self.get_ordered_articles()[0].name, "Article 1")
self.assertSequenceEqual(
self.get_ordered_articles()[1:3],
[self.articles[1], self.articles[2]],
)
def test_slicing_with_steps_can_be_used(self):
self.assertSequenceEqual(
self.get_ordered_articles()[::2],
[
self.articles[0],
self.articles[2],
self.articles[4],
self.articles[6],
],
)
def test_slicing_without_step_is_lazy(self):
with self.assertNumQueries(0):
self.get_ordered_articles()[0:5]
def test_slicing_with_tests_is_not_lazy(self):
with self.assertNumQueries(1):
self.get_ordered_articles()[0:5:3]
def test_slicing_can_slice_again_after_slicing(self):
self.assertSequenceEqual(
self.get_ordered_articles()[0:5][0:2],
[self.articles[0], self.articles[1]],
)
self.assertSequenceEqual(
self.get_ordered_articles()[0:5][4:], [self.articles[4]]
)
self.assertSequenceEqual(self.get_ordered_articles()[0:5][5:], [])
# Some more tests!
self.assertSequenceEqual(
self.get_ordered_articles()[2:][0:2],
[self.articles[2], self.articles[3]],
)
self.assertSequenceEqual(
self.get_ordered_articles()[2:][:2],
[self.articles[2], self.articles[3]],
)
self.assertSequenceEqual(
self.get_ordered_articles()[2:][2:3], [self.articles[4]]
)
# Using an offset without a limit is also possible.
self.assertSequenceEqual(
self.get_ordered_articles()[5:],
[self.articles[5], self.articles[6]],
)
def test_slicing_cannot_filter_queryset_once_sliced(self):
msg = "Cannot filter a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:5].filter(id=1)
def test_slicing_cannot_reorder_queryset_once_sliced(self):
msg = "Cannot reorder a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:5].order_by("id")
def test_slicing_cannot_combine_queries_once_sliced(self):
msg = "Cannot combine queries once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:1] & Article.objects.all()[4:5]
def test_slicing_negative_indexing_not_supported_for_single_element(self):
"""hint: inverting your ordering might do what you need"""
msg = "Negative indexing is not supported."
with self.assertRaisesMessage(ValueError, msg):
Article.objects.all()[-1]
def test_slicing_negative_indexing_not_supported_for_range(self):
"""hint: inverting your ordering might do what you need"""
msg = "Negative indexing is not supported."
with self.assertRaisesMessage(ValueError, msg):
Article.objects.all()[0:-5]
with self.assertRaisesMessage(ValueError, msg):
Article.objects.all()[-1:]
def test_invalid_index(self):
msg = "QuerySet indices must be integers or slices, not str."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()["foo"]
def test_can_get_number_of_items_in_queryset_using_standard_len(self):
self.assertEqual(len(Article.objects.filter(name__exact="Article 1")), 1)
def test_can_combine_queries_using_and_and_or_operators(self):
s1 = Article.objects.filter(name__exact="Article 1")
s2 = Article.objects.filter(name__exact="Article 2")
self.assertSequenceEqual(
(s1 | s2).order_by("name"),
[self.articles[0], self.articles[1]],
)
self.assertSequenceEqual(s1 & s2, [])
class WeirdQuerysetSlicingTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=1)
Number.objects.create(num=2)
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="two", created=datetime.datetime.now())
Article.objects.create(name="three", created=datetime.datetime.now())
Article.objects.create(name="four", created=datetime.datetime.now())
food = Food.objects.create(name="spam")
Eaten.objects.create(meal="spam with eggs", food=food)
def test_tickets_7698_10202(self):
# People like to slice with '0' as the high-water mark.
self.assertQuerysetEqual(Article.objects.all()[0:0], [])
self.assertQuerysetEqual(Article.objects.all()[0:0][:10], [])
self.assertEqual(Article.objects.all()[:0].count(), 0)
msg = "Cannot change a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[:0].latest("created")
def test_empty_resultset_sql(self):
# ticket #12192
self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1]))
def test_empty_sliced_subquery(self):
self.assertEqual(
Eaten.objects.filter(food__in=Food.objects.all()[0:0]).count(), 0
)
def test_empty_sliced_subquery_exclude(self):
self.assertEqual(
Eaten.objects.exclude(food__in=Food.objects.all()[0:0]).count(), 1
)
def test_zero_length_values_slicing(self):
n = 42
with self.assertNumQueries(0):
self.assertQuerysetEqual(Article.objects.values()[n:n], [])
self.assertQuerysetEqual(Article.objects.values_list()[n:n], [])
class EscapingTests(TestCase):
def test_ticket_7302(self):
# Reserved names are appropriately escaped
r_a = ReservedName.objects.create(name="a", order=42)
r_b = ReservedName.objects.create(name="b", order=37)
self.assertSequenceEqual(
ReservedName.objects.order_by("order"),
[r_b, r_a],
)
self.assertSequenceEqual(
ReservedName.objects.extra(
select={"stuff": "name"}, order_by=("order", "stuff")
),
[r_b, r_a],
)
class ToFieldTests(TestCase):
def test_in_query(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food__in=[apple, pear])),
{lunch, dinner},
)
def test_in_subquery(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Eaten.objects.filter(food__in=Food.objects.filter(name="apple"))),
{lunch},
)
self.assertEqual(
set(
Eaten.objects.filter(
food__in=Food.objects.filter(name="apple").values("eaten__meal")
)
),
set(),
)
self.assertEqual(
set(Food.objects.filter(eaten__in=Eaten.objects.filter(meal="lunch"))),
{apple},
)
def test_nested_in_subquery(self):
extra = ExtraInfo.objects.create()
author = Author.objects.create(num=42, extra=extra)
report = Report.objects.create(creator=author)
comment = ReportComment.objects.create(report=report)
comments = ReportComment.objects.filter(
report__in=Report.objects.filter(
creator__in=extra.author_set.all(),
),
)
self.assertSequenceEqual(comments, [comment])
def test_reverse_in(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch_apple = Eaten.objects.create(food=apple, meal="lunch")
lunch_pear = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])), {apple, pear}
)
def test_single_object(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=apple, meal="dinner")
self.assertEqual(set(Eaten.objects.filter(food=apple)), {lunch, dinner})
def test_single_object_reverse(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(set(Food.objects.filter(eaten=lunch)), {apple})
def test_recursive_fk(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(list(Node.objects.filter(parent=node1)), [node2])
def test_recursive_fk_reverse(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(list(Node.objects.filter(node=node2)), [node1])
class IsNullTests(TestCase):
def test_primary_key(self):
custom = CustomPk.objects.create(name="pk")
null = Related.objects.create()
notnull = Related.objects.create(custom=custom)
self.assertSequenceEqual(
Related.objects.filter(custom__isnull=False), [notnull]
)
self.assertSequenceEqual(Related.objects.filter(custom__isnull=True), [null])
def test_to_field(self):
apple = Food.objects.create(name="apple")
e1 = Eaten.objects.create(food=apple, meal="lunch")
e2 = Eaten.objects.create(meal="lunch")
self.assertSequenceEqual(
Eaten.objects.filter(food__isnull=False),
[e1],
)
self.assertSequenceEqual(
Eaten.objects.filter(food__isnull=True),
[e2],
)
class ConditionalTests(TestCase):
"""Tests whose execution depend on different environment conditions like
Python version or DB backend features"""
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name="t1", category=generic)
Tag.objects.create(name="t2", parent=t1, category=generic)
t3 = Tag.objects.create(name="t3", parent=t1)
Tag.objects.create(name="t4", parent=t3)
Tag.objects.create(name="t5", parent=t3)
def test_infinite_loop(self):
# If you're not careful, it's possible to introduce infinite loops via
# default ordering on foreign keys in a cycle. We detect that.
with self.assertRaisesMessage(FieldError, "Infinite loop caused by ordering."):
list(LoopX.objects.all()) # Force queryset evaluation with list()
with self.assertRaisesMessage(FieldError, "Infinite loop caused by ordering."):
list(LoopZ.objects.all()) # Force queryset evaluation with list()
# Note that this doesn't cause an infinite loop, since the default
# ordering on the Tag model is empty (and thus defaults to using "id"
# for the related field).
self.assertEqual(len(Tag.objects.order_by("parent")), 5)
# ... but you can still order in a non-recursive fashion among linked
# fields (the previous test failed because the default ordering was
# recursive).
self.assertQuerysetEqual(LoopX.objects.order_by("y__x__y__x__id"), [])
# When grouping without specifying ordering, we add an explicit "ORDER BY NULL"
# portion in MySQL to prevent unnecessary sorting.
@skipUnlessDBFeature("requires_explicit_null_ordering_when_grouping")
def test_null_ordering_added(self):
query = Tag.objects.values_list("parent_id", flat=True).order_by().query
query.group_by = ["parent_id"]
sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0]
fragment = "ORDER BY "
pos = sql.find(fragment)
self.assertEqual(sql.find(fragment, pos + 1), -1)
self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment))
def test_in_list_limit(self):
# The "in" lookup works with lists of 1000 items or more.
# The numbers amount is picked to force three different IN batches
# for Oracle, yet to be less than 2100 parameter limit for MSSQL.
numbers = list(range(2050))
max_query_params = connection.features.max_query_params
if max_query_params is None or max_query_params >= len(numbers):
Number.objects.bulk_create(Number(num=num) for num in numbers)
for number in [1000, 1001, 2000, len(numbers)]:
with self.subTest(number=number):
self.assertEqual(
Number.objects.filter(num__in=numbers[:number]).count(), number
)
class UnionTests(unittest.TestCase):
"""
Tests for the union of two querysets. Bug #12252.
"""
@classmethod
def setUpTestData(cls):
objectas = []
objectbs = []
objectcs = []
a_info = ["one", "two", "three"]
for name in a_info:
o = ObjectA(name=name)
o.save()
objectas.append(o)
b_info = [
("un", 1, objectas[0]),
("deux", 2, objectas[0]),
("trois", 3, objectas[2]),
]
for name, number, objecta in b_info:
o = ObjectB(name=name, num=number, objecta=objecta)
o.save()
objectbs.append(o)
c_info = [("ein", objectas[2], objectbs[2]), ("zwei", objectas[1], objectbs[1])]
for name, objecta, objectb in c_info:
o = ObjectC(name=name, objecta=objecta, objectb=objectb)
o.save()
objectcs.append(o)
def check_union(self, model, Q1, Q2):
filter = model.objects.filter
self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2)))
self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2)))
def test_A_AB(self):
Q1 = Q(name="two")
Q2 = Q(objectb__name="deux")
self.check_union(ObjectA, Q1, Q2)
def test_A_AB2(self):
Q1 = Q(name="two")
Q2 = Q(objectb__name="deux", objectb__num=2)
self.check_union(ObjectA, Q1, Q2)
def test_AB_ACB(self):
Q1 = Q(objectb__name="deux")
Q2 = Q(objectc__objectb__name="deux")
self.check_union(ObjectA, Q1, Q2)
def test_BAB_BAC(self):
Q1 = Q(objecta__objectb__name="deux")
Q2 = Q(objecta__objectc__name="ein")
self.check_union(ObjectB, Q1, Q2)
def test_BAB_BACB(self):
Q1 = Q(objecta__objectb__name="deux")
Q2 = Q(objecta__objectc__objectb__name="trois")
self.check_union(ObjectB, Q1, Q2)
def test_BA_BCA__BAB_BAC_BCA(self):
Q1 = Q(objecta__name="one", objectc__objecta__name="two")
Q2 = Q(
objecta__objectc__name="ein",
objectc__objecta__name="three",
objecta__objectb__name="trois",
)
self.check_union(ObjectB, Q1, Q2)
class DefaultValuesInsertTest(TestCase):
def test_no_extra_params(self):
"""
Can create an instance of a model with only the PK field (#17056)."
"""
DumbCategory.objects.create()
class ExcludeTests(TestCase):
@classmethod
def setUpTestData(cls):
f1 = Food.objects.create(name="apples")
cls.f2 = Food.objects.create(name="oranges")
Eaten.objects.create(food=f1, meal="dinner")
cls.j1 = Job.objects.create(name="Manager")
cls.r1 = Responsibility.objects.create(description="Playing golf")
cls.j2 = Job.objects.create(name="Programmer")
cls.r2 = Responsibility.objects.create(description="Programming")
JobResponsibilities.objects.create(job=cls.j1, responsibility=cls.r1)
JobResponsibilities.objects.create(job=cls.j2, responsibility=cls.r2)
def test_to_field(self):
self.assertSequenceEqual(
Food.objects.exclude(eaten__meal="dinner"),
[self.f2],
)
self.assertSequenceEqual(
Job.objects.exclude(responsibilities__description="Playing golf"),
[self.j2],
)
self.assertSequenceEqual(
Responsibility.objects.exclude(jobs__name="Manager"),
[self.r2],
)
def test_ticket14511(self):
alex = Person.objects.get_or_create(name="Alex")[0]
jane = Person.objects.get_or_create(name="Jane")[0]
oracle = Company.objects.get_or_create(name="Oracle")[0]
google = Company.objects.get_or_create(name="Google")[0]
microsoft = Company.objects.get_or_create(name="Microsoft")[0]
intel = Company.objects.get_or_create(name="Intel")[0]
def employ(employer, employee, title):
Employment.objects.get_or_create(
employee=employee, employer=employer, title=title
)
employ(oracle, alex, "Engineer")
employ(oracle, alex, "Developer")
employ(google, alex, "Engineer")
employ(google, alex, "Manager")
employ(microsoft, alex, "Manager")
employ(intel, alex, "Manager")
employ(microsoft, jane, "Developer")
employ(intel, jane, "Manager")
alex_tech_employers = (
alex.employers.filter(employment__title__in=("Engineer", "Developer"))
.distinct()
.order_by("name")
)
self.assertSequenceEqual(alex_tech_employers, [google, oracle])
alex_nontech_employers = (
alex.employers.exclude(employment__title__in=("Engineer", "Developer"))
.distinct()
.order_by("name")
)
self.assertSequenceEqual(alex_nontech_employers, [google, intel, microsoft])
def test_exclude_reverse_fk_field_ref(self):
tag = Tag.objects.create()
Note.objects.create(tag=tag, note="note")
annotation = Annotation.objects.create(name="annotation", tag=tag)
self.assertEqual(
Annotation.objects.exclude(tag__note__note=F("name")).get(), annotation
)
def test_exclude_with_circular_fk_relation(self):
self.assertEqual(
ObjectB.objects.exclude(objecta__objectb__name=F("name")).count(), 0
)
def test_subquery_exclude_outerref(self):
qs = JobResponsibilities.objects.filter(
Exists(Responsibility.objects.exclude(jobs=OuterRef("job"))),
)
self.assertTrue(qs.exists())
self.r1.delete()
self.assertFalse(qs.exists())
def test_exclude_nullable_fields(self):
number = Number.objects.create(num=1, other_num=1)
Number.objects.create(num=2, other_num=2, another_num=2)
self.assertSequenceEqual(
Number.objects.exclude(other_num=F("another_num")),
[number],
)
self.assertSequenceEqual(
Number.objects.exclude(num=F("another_num")),
[number],
)
def test_exclude_multivalued_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertSequenceEqual(
Job.objects.exclude(responsibilities__description="Programming"),
[self.j1],
)
self.assertIn("exists", captured_queries[0]["sql"].lower())
def test_exclude_subquery(self):
subquery = JobResponsibilities.objects.filter(
responsibility__description="bar",
) | JobResponsibilities.objects.exclude(
job__responsibilities__description="foo",
)
self.assertCountEqual(
Job.objects.annotate(
responsibility=subquery.filter(job=OuterRef("name"),).values(
"id"
)[:1]
),
[self.j1, self.j2],
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_exclude_unsaved_o2o_object(self):
jack = Staff.objects.create(name="jack")
jack_staff = StaffUser.objects.create(staff=jack)
unsaved_object = Staff(name="jane")
self.assertIsNone(unsaved_object.pk)
self.assertSequenceEqual(
StaffUser.objects.exclude(staff=unsaved_object), [jack_staff]
)
def test_exclude_unsaved_object(self):
# These tests will catch ValueError in Django 5.0 when passing unsaved
# model instances to related filters becomes forbidden.
# msg = "Model instances passed to related filters must be saved."
company = Company.objects.create(name="Django")
msg = "Passing unsaved model instances to related filters is deprecated."
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
Employment.objects.exclude(employer=Company(name="unsaved"))
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
Employment.objects.exclude(employer__in=[company, Company(name="unsaved")])
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
StaffUser.objects.exclude(staff=Staff(name="unsaved"))
class ExcludeTest17600(TestCase):
"""
Some regressiontests for ticket #17600. Some of these likely duplicate
other existing tests.
"""
@classmethod
def setUpTestData(cls):
# Create a few Orders.
cls.o1 = Order.objects.create(pk=1)
cls.o2 = Order.objects.create(pk=2)
cls.o3 = Order.objects.create(pk=3)
# Create some OrderItems for the first order with homogeneous
# status_id values
cls.oi1 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi2 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi3 = OrderItem.objects.create(order=cls.o1, status=1)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi4 = OrderItem.objects.create(order=cls.o2, status=1)
cls.oi5 = OrderItem.objects.create(order=cls.o2, status=2)
cls.oi6 = OrderItem.objects.create(order=cls.o2, status=3)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi7 = OrderItem.objects.create(order=cls.o3, status=2)
cls.oi8 = OrderItem.objects.create(order=cls.o3, status=3)
cls.oi9 = OrderItem.objects.create(order=cls.o3, status=4)
def test_exclude_plain(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(items__status=1),
[self.o3],
)
def test_exclude_plain_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(items__status=1).distinct(),
[self.o3],
)
def test_exclude_with_q_object_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(Q(items__status=1)).distinct(),
[self.o3],
)
def test_exclude_with_q_object_no_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(Q(items__status=1)),
[self.o3],
)
def test_exclude_with_q_is_equal_to_plain_exclude(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1).distinct()),
list(Order.objects.exclude(Q(items__status=1)).distinct()),
)
def test_exclude_with_q_is_equal_to_plain_exclude_variation(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1)),
list(Order.objects.exclude(Q(items__status=1)).distinct()),
)
@unittest.expectedFailure
def test_only_orders_with_all_items_having_status_1(self):
"""
This should only return orders having ALL items set to status 1, or
those items not having any orders at all. The correct way to write
this query in SQL seems to be using two nested subqueries.
"""
self.assertQuerysetEqual(
Order.objects.exclude(~Q(items__status=1)).distinct(),
[self.o1],
)
class Exclude15786(TestCase):
"""Regression test for #15786"""
def test_ticket15786(self):
c1 = SimpleCategory.objects.create(name="c1")
c2 = SimpleCategory.objects.create(name="c2")
OneToOneCategory.objects.create(category=c1)
OneToOneCategory.objects.create(category=c2)
rel = CategoryRelationship.objects.create(first=c1, second=c2)
self.assertEqual(
CategoryRelationship.objects.exclude(
first__onetoonecategory=F("second__onetoonecategory")
).get(),
rel,
)
class NullInExcludeTest(TestCase):
@classmethod
def setUpTestData(cls):
NullableName.objects.create(name="i1")
NullableName.objects.create()
def test_null_in_exclude_qs(self):
none_val = "" if connection.features.interprets_empty_strings_as_nulls else None
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[]),
["i1", none_val],
attrgetter("name"),
)
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=["i1"]),
[none_val],
attrgetter("name"),
)
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=["i3"]),
["i1", none_val],
attrgetter("name"),
)
inner_qs = NullableName.objects.filter(name="i1").values_list("name")
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=inner_qs),
[none_val],
attrgetter("name"),
)
# The inner queryset wasn't executed - it should be turned
# into subquery above
self.assertIs(inner_qs._result_cache, None)
@unittest.expectedFailure
def test_col_not_in_list_containing_null(self):
"""
The following case is not handled properly because
SQL's COL NOT IN (list containing null) handling is too weird to
abstract away.
"""
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[None]), ["i1"], attrgetter("name")
)
def test_double_exclude(self):
self.assertEqual(
list(NullableName.objects.filter(~~Q(name="i1"))),
list(NullableName.objects.filter(Q(name="i1"))),
)
self.assertNotIn(
"IS NOT NULL", str(NullableName.objects.filter(~~Q(name="i1")).query)
)
class EmptyStringsAsNullTest(TestCase):
"""
Filtering on non-null character fields works as expected.
The reason for these tests is that Oracle treats '' as NULL, and this
can cause problems in query construction. Refs #17957.
"""
@classmethod
def setUpTestData(cls):
cls.nc = NamedCategory.objects.create(name="")
def test_direct_exclude(self):
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name__in=["nonexistent"]),
[self.nc.pk],
attrgetter("pk"),
)
def test_joined_exclude(self):
self.assertQuerysetEqual(
DumbCategory.objects.exclude(namedcategory__name__in=["nonexistent"]),
[self.nc.pk],
attrgetter("pk"),
)
def test_21001(self):
foo = NamedCategory.objects.create(name="foo")
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name=""), [foo.pk], attrgetter("pk")
)
class ProxyQueryCleanupTest(TestCase):
def test_evaluated_proxy_count(self):
"""
Generating the query string doesn't alter the query's state
in irreversible ways. Refs #18248.
"""
ProxyCategory.objects.create()
qs = ProxyCategory.objects.all()
self.assertEqual(qs.count(), 1)
str(qs.query)
self.assertEqual(qs.count(), 1)
class WhereNodeTest(SimpleTestCase):
class DummyNode:
def as_sql(self, compiler, connection):
return "dummy", []
class MockCompiler:
def compile(self, node):
return node.as_sql(self, connection)
def __call__(self, name):
return connection.ops.quote_name(name)
def test_empty_full_handling_conjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()])
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()])
self.assertEqual(w.as_sql(compiler, connection), ("(dummy AND dummy)", []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("NOT (dummy AND dummy)", []))
w = WhereNode(children=[NothingNode(), self.DummyNode()])
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("", []))
def test_empty_full_handling_disjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()], connector=OR)
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()], connector=OR)
self.assertEqual(w.as_sql(compiler, connection), ("(dummy OR dummy)", []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("NOT (dummy OR dummy)", []))
w = WhereNode(children=[NothingNode(), self.DummyNode()], connector=OR)
self.assertEqual(w.as_sql(compiler, connection), ("dummy", []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("NOT (dummy)", []))
def test_empty_nodes(self):
compiler = WhereNodeTest.MockCompiler()
empty_w = WhereNode()
w = WhereNode(children=[empty_w, empty_w])
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w.negate()
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.connector = OR
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w = WhereNode(children=[empty_w, NothingNode()], connector=OR)
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w = WhereNode(children=[empty_w, NothingNode()], connector=AND)
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
class QuerySetExceptionTests(SimpleTestCase):
def test_invalid_order_by(self):
msg = "Cannot resolve keyword '*' into field. Choices are: created, id, name"
with self.assertRaisesMessage(FieldError, msg):
Article.objects.order_by("*")
def test_invalid_order_by_raw_column_alias(self):
msg = (
"Cannot resolve keyword 'queries_author.name' into field. Choices "
"are: cover, created, creator, creator_id, id, modified, name, "
"note, note_id, tags"
)
with self.assertRaisesMessage(FieldError, msg):
Item.objects.values("creator__name").order_by("queries_author.name")
def test_invalid_queryset_model(self):
msg = 'Cannot use QuerySet for "Article": Use a QuerySet for "ExtraInfo".'
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.filter(extra=Article.objects.all()))
class NullJoinPromotionOrTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.d1 = ModelD.objects.create(name="foo")
d2 = ModelD.objects.create(name="bar")
cls.a1 = ModelA.objects.create(name="a1", d=cls.d1)
c = ModelC.objects.create(name="c")
b = ModelB.objects.create(name="b", c=c)
cls.a2 = ModelA.objects.create(name="a2", b=b, d=d2)
def test_ticket_17886(self):
# The first Q-object is generating the match, the rest of the filters
# should not remove the match even if they do not match anything. The
# problem here was that b__name generates a LOUTER JOIN, then
# b__c__name generates join to c, which the ORM tried to promote but
# failed as that join isn't nullable.
q_obj = Q(d__name="foo") | Q(b__name="foo") | Q(b__c__name="foo")
qset = ModelA.objects.filter(q_obj)
self.assertEqual(list(qset), [self.a1])
# We generate one INNER JOIN to D. The join is direct and not nullable
# so we can use INNER JOIN for it. However, we can NOT use INNER JOIN
# for the b->c join, as a->b is nullable.
self.assertEqual(str(qset.query).count("INNER JOIN"), 1)
def test_isnull_filter_promotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count("LEFT OUTER"), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count("LEFT OUTER"), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count("LEFT OUTER"), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(list(qs), [self.a2])
def test_null_join_demotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=False) & Q(b__name__isnull=True))
self.assertIn(" INNER JOIN ", str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) & Q(b__name__isnull=False))
self.assertIn(" INNER JOIN ", str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=False) | Q(b__name__isnull=True))
self.assertIn(" LEFT OUTER JOIN ", str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) | Q(b__name__isnull=False))
self.assertIn(" LEFT OUTER JOIN ", str(qs.query))
def test_ticket_21366(self):
n = Note.objects.create(note="n", misc="m")
e = ExtraInfo.objects.create(info="info", note=n)
a = Author.objects.create(name="Author1", num=1, extra=e)
Ranking.objects.create(rank=1, author=a)
r1 = Report.objects.create(name="Foo", creator=a)
r2 = Report.objects.create(name="Bar")
Report.objects.create(name="Bar", creator=a)
qs = Report.objects.filter(
Q(creator__ranking__isnull=True) | Q(creator__ranking__rank=1, name="Foo")
)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
self.assertEqual(str(qs.query).count(" JOIN "), 2)
self.assertSequenceEqual(qs.order_by("name"), [r2, r1])
def test_ticket_21748(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
i3 = Identifier.objects.create(name="i3")
Program.objects.create(identifier=i1)
Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
self.assertSequenceEqual(
Identifier.objects.filter(program=None, channel=None), [i3]
)
self.assertSequenceEqual(
Identifier.objects.exclude(program=None, channel=None).order_by("name"),
[i1, i2],
)
def test_ticket_21748_double_negated_and(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
Identifier.objects.create(name="i3")
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
# Check the ~~Q() (or equivalently .exclude(~Q)) works like Q() for
# join promotion.
qs1_doubleneg = Identifier.objects.exclude(
~Q(program__id=p1.id, channel__id=c1.id)
).order_by("pk")
qs1_filter = Identifier.objects.filter(
program__id=p1.id, channel__id=c1.id
).order_by("pk")
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(
str(qs1_filter.query).count("JOIN"), str(qs1_doubleneg.query).count("JOIN")
)
self.assertEqual(2, str(qs1_doubleneg.query).count("INNER JOIN"))
self.assertEqual(
str(qs1_filter.query).count("INNER JOIN"),
str(qs1_doubleneg.query).count("INNER JOIN"),
)
def test_ticket_21748_double_negated_or(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
Identifier.objects.create(name="i3")
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Test OR + doubleneg. The expected result is that channel is LOUTER
# joined, program INNER joined
qs1_filter = Identifier.objects.filter(
Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id)
).order_by("pk")
qs1_doubleneg = Identifier.objects.exclude(
~Q(Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id))
).order_by("pk")
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(
str(qs1_filter.query).count("JOIN"), str(qs1_doubleneg.query).count("JOIN")
)
self.assertEqual(1, str(qs1_doubleneg.query).count("INNER JOIN"))
self.assertEqual(
str(qs1_filter.query).count("INNER JOIN"),
str(qs1_doubleneg.query).count("INNER JOIN"),
)
def test_ticket_21748_complex_filter(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
Identifier.objects.create(name="i3")
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Finally, a more complex case, one time in a way where each
# NOT is pushed to lowest level in the boolean tree, and
# another query where this isn't done.
qs1 = Identifier.objects.filter(
~Q(~Q(program__id=p2.id, channel__id=c1.id) & Q(program__id=p1.id))
).order_by("pk")
qs2 = Identifier.objects.filter(
Q(Q(program__id=p2.id, channel__id=c1.id) | ~Q(program__id=p1.id))
).order_by("pk")
self.assertQuerysetEqual(qs1, qs2, lambda x: x)
self.assertEqual(str(qs1.query).count("JOIN"), str(qs2.query).count("JOIN"))
self.assertEqual(0, str(qs1.query).count("INNER JOIN"))
self.assertEqual(
str(qs1.query).count("INNER JOIN"), str(qs2.query).count("INNER JOIN")
)
class ReverseJoinTrimmingTest(TestCase):
def test_reverse_trimming(self):
# We don't accidentally trim reverse joins - we can't know if there is
# anything on the other side of the join, so trimming reverse joins
# can't be done, ever.
t = Tag.objects.create()
qs = Tag.objects.filter(annotation__tag=t.pk)
self.assertIn("INNER JOIN", str(qs.query))
self.assertEqual(list(qs), [])
class JoinReuseTest(TestCase):
"""
The queries reuse joins sensibly (for example, direct joins
are always reused).
"""
def test_fk_reuse(self):
qs = Annotation.objects.filter(tag__name="foo").filter(tag__name="bar")
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_fk_reuse_select_related(self):
qs = Annotation.objects.filter(tag__name="foo").select_related("tag")
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_fk_reuse_annotation(self):
qs = Annotation.objects.filter(tag__name="foo").annotate(cnt=Count("tag__name"))
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_fk_reuse_disjunction(self):
qs = Annotation.objects.filter(Q(tag__name="foo") | Q(tag__name="bar"))
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_fk_reuse_order_by(self):
qs = Annotation.objects.filter(tag__name="foo").order_by("tag__name")
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_revo2o_reuse(self):
qs = Detail.objects.filter(member__name="foo").filter(member__name="foo")
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_revfk_noreuse(self):
qs = Author.objects.filter(report__name="r4").filter(report__name="r1")
self.assertEqual(str(qs.query).count("JOIN"), 2)
def test_inverted_q_across_relations(self):
"""
When a trimmable join is specified in the query (here school__), the
ORM detects it and removes unnecessary joins. The set of reusable joins
are updated after trimming the query so that other lookups don't
consider that the outer query's filters are in effect for the subquery
(#26551).
"""
springfield_elementary = School.objects.create()
hogward = School.objects.create()
Student.objects.create(school=springfield_elementary)
hp = Student.objects.create(school=hogward)
Classroom.objects.create(school=hogward, name="Potion")
Classroom.objects.create(school=springfield_elementary, name="Main")
qs = Student.objects.filter(
~(
Q(school__classroom__name="Main")
& Q(school__classroom__has_blackboard=None)
)
)
self.assertSequenceEqual(qs, [hp])
class DisjunctionPromotionTests(TestCase):
def test_disjunction_promotion_select_related(self):
fk1 = FK1.objects.create(f1="f1", f2="f2")
basea = BaseA.objects.create(a=fk1)
qs = BaseA.objects.filter(Q(a=fk1) | Q(b=2))
self.assertEqual(str(qs.query).count(" JOIN "), 0)
qs = qs.select_related("a", "b")
self.assertEqual(str(qs.query).count(" INNER JOIN "), 0)
self.assertEqual(str(qs.query).count(" LEFT OUTER JOIN "), 2)
with self.assertNumQueries(1):
self.assertSequenceEqual(qs, [basea])
self.assertEqual(qs[0].a, fk1)
self.assertIs(qs[0].b, None)
def test_disjunction_promotion1(self):
# Pre-existing join, add two ORed filters to the same join,
# all joins can be INNER JOINS.
qs = BaseA.objects.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
qs = qs.filter(Q(b__f1="foo") | Q(b__f2="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
# Reverse the order of AND and OR filters.
qs = BaseA.objects.filter(Q(b__f1="foo") | Q(b__f2="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
qs = qs.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
def test_disjunction_promotion2(self):
qs = BaseA.objects.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
# Now we have two different joins in an ORed condition, these
# must be OUTER joins. The pre-existing join should remain INNER.
qs = qs.filter(Q(b__f1="foo") | Q(c__f2="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
# Reverse case.
qs = BaseA.objects.filter(Q(b__f1="foo") | Q(c__f2="foo"))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
qs = qs.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
def test_disjunction_promotion3(self):
qs = BaseA.objects.filter(a__f2="bar")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
# The ANDed a__f2 filter allows us to use keep using INNER JOIN
# even inside the ORed case. If the join to a__ returns nothing,
# the ANDed filter for a__f2 can't be true.
qs = qs.filter(Q(a__f1="foo") | Q(b__f2="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
def test_disjunction_promotion3_demote(self):
# This one needs demotion logic: the first filter causes a to be
# outer joined, the second filter makes it inner join again.
qs = BaseA.objects.filter(Q(a__f1="foo") | Q(b__f2="foo")).filter(a__f2="bar")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
def test_disjunction_promotion4_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("JOIN"), 0)
# Demote needed for the "a" join. It is marked as outer join by
# above filter (even if it is trimmed away).
qs = qs.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
def test_disjunction_promotion4(self):
qs = BaseA.objects.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
def test_disjunction_promotion5_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
# Note that the above filters on a force the join to an
# inner join even if it is trimmed.
self.assertEqual(str(qs.query).count("JOIN"), 0)
qs = qs.filter(Q(a__f1="foo") | Q(b__f1="foo"))
# So, now the a__f1 join doesn't need promotion.
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
# But b__f1 does.
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
qs = BaseA.objects.filter(Q(a__f1="foo") | Q(b__f1="foo"))
# Now the join to a is created as LOUTER
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
def test_disjunction_promotion6(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("JOIN"), 0)
qs = BaseA.objects.filter(Q(a__f1="foo") & Q(b__f1="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 0)
qs = BaseA.objects.filter(Q(a__f1="foo") & Q(b__f1="foo"))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 0)
def test_disjunction_promotion7(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("JOIN"), 0)
qs = BaseA.objects.filter(Q(a__f1="foo") | (Q(b__f1="foo") & Q(a__f1="bar")))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
qs = BaseA.objects.filter(
(Q(a__f1="foo") | Q(b__f1="foo")) & (Q(a__f1="bar") | Q(c__f1="foo"))
)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 3)
self.assertEqual(str(qs.query).count("INNER JOIN"), 0)
qs = BaseA.objects.filter(
Q(a__f1="foo") | Q(a__f1="bar") & (Q(b__f1="bar") | Q(c__f1="foo"))
)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
def test_disjunction_promotion_fexpression(self):
qs = BaseA.objects.filter(Q(a__f1=F("b__f1")) | Q(b__f1="foo"))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
qs = BaseA.objects.filter(Q(a__f1=F("c__f1")) | Q(b__f1="foo"))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 3)
qs = BaseA.objects.filter(
Q(a__f1=F("b__f1")) | Q(a__f2=F("b__f2")) | Q(c__f1="foo")
)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 3)
qs = BaseA.objects.filter(Q(a__f1=F("c__f1")) | (Q(pk=1) & Q(pk=2)))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
self.assertEqual(str(qs.query).count("INNER JOIN"), 0)
class ManyToManyExcludeTest(TestCase):
def test_exclude_many_to_many(self):
i_extra = Identifier.objects.create(name="extra")
i_program = Identifier.objects.create(name="program")
program = Program.objects.create(identifier=i_program)
i_channel = Identifier.objects.create(name="channel")
channel = Channel.objects.create(identifier=i_channel)
channel.programs.add(program)
# channel contains 'program1', so all Identifiers except that one
# should be returned
self.assertSequenceEqual(
Identifier.objects.exclude(program__channel=channel).order_by("name"),
[i_channel, i_extra],
)
self.assertSequenceEqual(
Identifier.objects.exclude(program__channel=None).order_by("name"),
[i_program],
)
def test_ticket_12823(self):
pg3 = Page.objects.create(text="pg3")
pg2 = Page.objects.create(text="pg2")
pg1 = Page.objects.create(text="pg1")
pa1 = Paragraph.objects.create(text="pa1")
pa1.page.set([pg1, pg2])
pa2 = Paragraph.objects.create(text="pa2")
pa2.page.set([pg2, pg3])
pa3 = Paragraph.objects.create(text="pa3")
ch1 = Chapter.objects.create(title="ch1", paragraph=pa1)
ch2 = Chapter.objects.create(title="ch2", paragraph=pa2)
ch3 = Chapter.objects.create(title="ch3", paragraph=pa3)
b1 = Book.objects.create(title="b1", chapter=ch1)
b2 = Book.objects.create(title="b2", chapter=ch2)
b3 = Book.objects.create(title="b3", chapter=ch3)
q = Book.objects.exclude(chapter__paragraph__page__text="pg1")
self.assertNotIn("IS NOT NULL", str(q.query))
self.assertEqual(len(q), 2)
self.assertNotIn(b1, q)
self.assertIn(b2, q)
self.assertIn(b3, q)
class RelabelCloneTest(TestCase):
def test_ticket_19964(self):
my1 = MyObject.objects.create(data="foo")
my1.parent = my1
my1.save()
my2 = MyObject.objects.create(data="bar", parent=my1)
parents = MyObject.objects.filter(parent=F("id"))
children = MyObject.objects.filter(parent__in=parents).exclude(parent=F("id"))
self.assertEqual(list(parents), [my1])
# Evaluating the children query (which has parents as part of it) does
# not change results for the parents query.
self.assertEqual(list(children), [my2])
self.assertEqual(list(parents), [my1])
class Ticket20101Tests(TestCase):
def test_ticket_20101(self):
"""
Tests QuerySet ORed combining in exclude subquery case.
"""
t = Tag.objects.create(name="foo")
a1 = Annotation.objects.create(tag=t, name="a1")
a2 = Annotation.objects.create(tag=t, name="a2")
a3 = Annotation.objects.create(tag=t, name="a3")
n = Note.objects.create(note="foo", misc="bar")
qs1 = Note.objects.exclude(annotation__in=[a1, a2])
qs2 = Note.objects.filter(annotation__in=[a3])
self.assertIn(n, qs1)
self.assertNotIn(n, qs2)
self.assertIn(n, (qs1 | qs2))
class EmptyStringPromotionTests(SimpleTestCase):
def test_empty_string_promotion(self):
qs = RelatedObject.objects.filter(single__name="")
if connection.features.interprets_empty_strings_as_nulls:
self.assertIn("LEFT OUTER JOIN", str(qs.query))
else:
self.assertNotIn("LEFT OUTER JOIN", str(qs.query))
class ValuesSubqueryTests(TestCase):
def test_values_in_subquery(self):
# If a values() queryset is used, then the given values
# will be used instead of forcing use of the relation's field.
o1 = Order.objects.create(id=-2)
o2 = Order.objects.create(id=-1)
oi1 = OrderItem.objects.create(order=o1, status=0)
oi1.status = oi1.pk
oi1.save()
OrderItem.objects.create(order=o2, status=0)
# The query below should match o1 as it has related order_item
# with id == status.
self.assertSequenceEqual(
Order.objects.filter(items__in=OrderItem.objects.values_list("status")),
[o1],
)
class DoubleInSubqueryTests(TestCase):
def test_double_subquery_in(self):
lfa1 = LeafA.objects.create(data="foo")
lfa2 = LeafA.objects.create(data="bar")
lfb1 = LeafB.objects.create(data="lfb1")
lfb2 = LeafB.objects.create(data="lfb2")
Join.objects.create(a=lfa1, b=lfb1)
Join.objects.create(a=lfa2, b=lfb2)
leaf_as = LeafA.objects.filter(data="foo").values_list("pk", flat=True)
joins = Join.objects.filter(a__in=leaf_as).values_list("b__id", flat=True)
qs = LeafB.objects.filter(pk__in=joins)
self.assertSequenceEqual(qs, [lfb1])
class Ticket18785Tests(SimpleTestCase):
def test_ticket_18785(self):
# Test join trimming from ticket18785
qs = (
Item.objects.exclude(note__isnull=False)
.filter(name="something", creator__extra__isnull=True)
.order_by()
)
self.assertEqual(1, str(qs.query).count("INNER JOIN"))
self.assertEqual(0, str(qs.query).count("OUTER JOIN"))
class Ticket20788Tests(TestCase):
def test_ticket_20788(self):
Paragraph.objects.create()
paragraph = Paragraph.objects.create()
page = paragraph.page.create()
chapter = Chapter.objects.create(paragraph=paragraph)
Book.objects.create(chapter=chapter)
paragraph2 = Paragraph.objects.create()
Page.objects.create()
chapter2 = Chapter.objects.create(paragraph=paragraph2)
book2 = Book.objects.create(chapter=chapter2)
sentences_not_in_pub = Book.objects.exclude(chapter__paragraph__page=page)
self.assertSequenceEqual(sentences_not_in_pub, [book2])
class Ticket12807Tests(TestCase):
def test_ticket_12807(self):
p1 = Paragraph.objects.create()
p2 = Paragraph.objects.create()
# The ORed condition below should have no effect on the query - the
# ~Q(pk__in=[]) will always be True.
qs = Paragraph.objects.filter((Q(pk=p2.pk) | ~Q(pk__in=[])) & Q(pk=p1.pk))
self.assertSequenceEqual(qs, [p1])
class RelatedLookupTypeTests(TestCase):
error = 'Cannot query "%s": Must be "%s" instance.'
@classmethod
def setUpTestData(cls):
cls.oa = ObjectA.objects.create(name="oa")
cls.poa = ProxyObjectA.objects.get(name="oa")
cls.coa = ChildObjectA.objects.create(name="coa")
cls.wrong_type = Order.objects.create(id=cls.oa.pk)
cls.ob = ObjectB.objects.create(name="ob", objecta=cls.oa, num=1)
cls.pob1 = ProxyObjectB.objects.create(name="pob", objecta=cls.oa, num=2)
cls.pob = ProxyObjectB.objects.all()
cls.c = ObjectC.objects.create(childobjecta=cls.coa)
def test_wrong_type_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup.
"""
# Passing incorrect object type
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)
):
ObjectB.objects.get(objecta=self.wrong_type)
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)
):
ObjectB.objects.filter(objecta__in=[self.wrong_type])
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)
):
ObjectB.objects.filter(objecta=self.wrong_type)
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)
):
ObjectA.objects.filter(objectb__in=[self.wrong_type, self.ob])
# Passing an object of the class on which query is done.
with self.assertRaisesMessage(
ValueError, self.error % (self.ob, ObjectA._meta.object_name)
):
ObjectB.objects.filter(objecta__in=[self.poa, self.ob])
with self.assertRaisesMessage(
ValueError, self.error % (self.ob, ChildObjectA._meta.object_name)
):
ObjectC.objects.exclude(childobjecta__in=[self.coa, self.ob])
def test_wrong_backward_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup for backward relations.
"""
with self.assertRaisesMessage(
ValueError, self.error % (self.oa, ObjectB._meta.object_name)
):
ObjectA.objects.filter(objectb__in=[self.oa, self.ob])
with self.assertRaisesMessage(
ValueError, self.error % (self.oa, ObjectB._meta.object_name)
):
ObjectA.objects.exclude(objectb=self.oa)
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)
):
ObjectA.objects.get(objectb=self.wrong_type)
def test_correct_lookup(self):
"""
When passing proxy model objects, child objects, or parent objects,
lookups work fine.
"""
out_a = [self.oa]
out_b = [self.ob, self.pob1]
out_c = [self.c]
# proxy model objects
self.assertSequenceEqual(
ObjectB.objects.filter(objecta=self.poa).order_by("name"), out_b
)
self.assertSequenceEqual(
ObjectA.objects.filter(objectb__in=self.pob).order_by("pk"), out_a * 2
)
# child objects
self.assertSequenceEqual(ObjectB.objects.filter(objecta__in=[self.coa]), [])
self.assertSequenceEqual(
ObjectB.objects.filter(objecta__in=[self.poa, self.coa]).order_by("name"),
out_b,
)
self.assertSequenceEqual(
ObjectB.objects.filter(objecta__in=iter([self.poa, self.coa])).order_by(
"name"
),
out_b,
)
# parent objects
self.assertSequenceEqual(ObjectC.objects.exclude(childobjecta=self.oa), out_c)
# QuerySet related object type checking shouldn't issue queries
# (the querysets aren't evaluated here, hence zero queries) (#23266).
with self.assertNumQueries(0):
ObjectB.objects.filter(objecta__in=ObjectA.objects.all())
def test_values_queryset_lookup(self):
"""
ValueQuerySets are not checked for compatibility with the lookup field.
"""
# Make sure the num and objecta field values match.
ob = ObjectB.objects.get(name="ob")
ob.num = ob.objecta.pk
ob.save()
pob = ObjectB.objects.get(name="pob")
pob.num = pob.objecta.pk
pob.save()
self.assertSequenceEqual(
ObjectB.objects.filter(
objecta__in=ObjectB.objects.values_list("num")
).order_by("pk"),
[ob, pob],
)
class Ticket14056Tests(TestCase):
def test_ticket_14056(self):
s1 = SharedConnection.objects.create(data="s1")
s2 = SharedConnection.objects.create(data="s2")
s3 = SharedConnection.objects.create(data="s3")
PointerA.objects.create(connection=s2)
expected_ordering = (
[s1, s3, s2] if connection.features.nulls_order_largest else [s2, s1, s3]
)
self.assertSequenceEqual(
SharedConnection.objects.order_by("-pointera__connection", "pk"),
expected_ordering,
)
class Ticket20955Tests(TestCase):
def test_ticket_20955(self):
jack = Staff.objects.create(name="jackstaff")
jackstaff = StaffUser.objects.create(staff=jack)
jill = Staff.objects.create(name="jillstaff")
jillstaff = StaffUser.objects.create(staff=jill)
task = Task.objects.create(creator=jackstaff, owner=jillstaff, title="task")
task_get = Task.objects.get(pk=task.pk)
# Load data so that assertNumQueries doesn't complain about the get
# version's queries.
task_get.creator.staffuser.staff
task_get.owner.staffuser.staff
qs = Task.objects.select_related(
"creator__staffuser__staff", "owner__staffuser__staff"
)
self.assertEqual(str(qs.query).count(" JOIN "), 6)
task_select_related = qs.get(pk=task.pk)
with self.assertNumQueries(0):
self.assertEqual(
task_select_related.creator.staffuser.staff,
task_get.creator.staffuser.staff,
)
self.assertEqual(
task_select_related.owner.staffuser.staff,
task_get.owner.staffuser.staff,
)
class Ticket21203Tests(TestCase):
def test_ticket_21203(self):
p = Ticket21203Parent.objects.create(parent_bool=True)
c = Ticket21203Child.objects.create(parent=p)
qs = Ticket21203Child.objects.select_related("parent").defer("parent__created")
self.assertSequenceEqual(qs, [c])
self.assertIs(qs[0].parent.parent_bool, True)
class ValuesJoinPromotionTests(TestCase):
def test_values_no_promotion_for_existing(self):
qs = Node.objects.filter(parent__parent__isnull=False)
self.assertIn(" INNER JOIN ", str(qs.query))
qs = qs.values("parent__parent__id")
self.assertIn(" INNER JOIN ", str(qs.query))
# Make sure there is a left outer join without the filter.
qs = Node.objects.values("parent__parent__id")
self.assertIn(" LEFT OUTER JOIN ", str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = ObjectB.objects.values("objecta__name")
self.assertIn(" INNER JOIN ", str(qs.query))
def test_ticket_21376(self):
a = ObjectA.objects.create()
ObjectC.objects.create(objecta=a)
qs = ObjectC.objects.filter(
Q(objecta=a) | Q(objectb__objecta=a),
)
qs = qs.filter(
Q(objectb=1) | Q(objecta=a),
)
self.assertEqual(qs.count(), 1)
tblname = connection.ops.quote_name(ObjectB._meta.db_table)
self.assertIn(" LEFT OUTER JOIN %s" % tblname, str(qs.query))
class ForeignKeyToBaseExcludeTests(TestCase):
def test_ticket_21787(self):
sc1 = SpecialCategory.objects.create(special_name="sc1", name="sc1")
sc2 = SpecialCategory.objects.create(special_name="sc2", name="sc2")
sc3 = SpecialCategory.objects.create(special_name="sc3", name="sc3")
c1 = CategoryItem.objects.create(category=sc1)
CategoryItem.objects.create(category=sc2)
self.assertSequenceEqual(
SpecialCategory.objects.exclude(categoryitem__id=c1.pk).order_by("name"),
[sc2, sc3],
)
self.assertSequenceEqual(
SpecialCategory.objects.filter(categoryitem__id=c1.pk), [sc1]
)
class ReverseM2MCustomPkTests(TestCase):
def test_ticket_21879(self):
cpt1 = CustomPkTag.objects.create(id="cpt1", tag="cpt1")
cp1 = CustomPk.objects.create(name="cp1", extra="extra")
cp1.custompktag_set.add(cpt1)
self.assertSequenceEqual(CustomPk.objects.filter(custompktag=cpt1), [cp1])
self.assertSequenceEqual(CustomPkTag.objects.filter(custom_pk=cp1), [cpt1])
class Ticket22429Tests(TestCase):
def test_ticket_22429(self):
sc1 = School.objects.create()
st1 = Student.objects.create(school=sc1)
sc2 = School.objects.create()
st2 = Student.objects.create(school=sc2)
cr = Classroom.objects.create(school=sc1)
cr.students.add(st1)
queryset = Student.objects.filter(~Q(classroom__school=F("school")))
self.assertSequenceEqual(queryset, [st2])
class Ticket23605Tests(TestCase):
def test_ticket_23605(self):
# Test filtering on a complicated q-object from ticket's report.
# The query structure is such that we have multiple nested subqueries.
# The original problem was that the inner queries weren't relabeled
# correctly.
# See also #24090.
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=10000.0)
Ticket23605B.objects.create(
field_b0=10000.0, field_b1=True, modelc_fk=c1, modela_fk=a1
)
complex_q = Q(
pk__in=Ticket23605A.objects.filter(
Q(
# True for a1 as field_b0 = 10000, field_c0=10000
# False for a2 as no ticket23605b found
ticket23605b__field_b0__gte=1000000
/ F("ticket23605b__modelc_fk__field_c0")
)
&
# True for a1 (field_b1=True)
Q(ticket23605b__field_b1=True)
& ~Q(
ticket23605b__pk__in=Ticket23605B.objects.filter(
~(
# Same filters as above commented filters, but
# double-negated (one for Q() above, one for
# parentheses). So, again a1 match, a2 not.
Q(field_b1=True)
& Q(field_b0__gte=1000000 / F("modelc_fk__field_c0"))
)
)
)
).filter(ticket23605b__field_b1=True)
)
qs1 = Ticket23605A.objects.filter(complex_q)
self.assertSequenceEqual(qs1, [a1])
qs2 = Ticket23605A.objects.exclude(complex_q)
self.assertSequenceEqual(qs2, [a2])
class TestTicket24279(TestCase):
def test_ticket_24278(self):
School.objects.create()
qs = School.objects.filter(Q(pk__in=()) | Q())
self.assertQuerysetEqual(qs, [])
class TestInvalidValuesRelation(SimpleTestCase):
def test_invalid_values(self):
msg = "Field 'id' expected a number but got 'abc'."
with self.assertRaisesMessage(ValueError, msg):
Annotation.objects.filter(tag="abc")
with self.assertRaisesMessage(ValueError, msg):
Annotation.objects.filter(tag__in=[123, "abc"])
class TestTicket24605(TestCase):
def test_ticket_24605(self):
"""
Subquery table names should be quoted.
"""
i1 = Individual.objects.create(alive=True)
RelatedIndividual.objects.create(related=i1)
i2 = Individual.objects.create(alive=False)
RelatedIndividual.objects.create(related=i2)
i3 = Individual.objects.create(alive=True)
i4 = Individual.objects.create(alive=False)
self.assertSequenceEqual(
Individual.objects.filter(
Q(alive=False), Q(related_individual__isnull=True)
),
[i4],
)
self.assertSequenceEqual(
Individual.objects.exclude(
Q(alive=False), Q(related_individual__isnull=True)
).order_by("pk"),
[i1, i2, i3],
)
class Ticket23622Tests(TestCase):
@skipUnlessDBFeature("can_distinct_on_fields")
def test_ticket_23622(self):
"""
Make sure __pk__in and __in work the same for related fields when
using a distinct on subquery.
"""
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=0.0)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=123,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=23,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=234,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=12,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=567,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=76,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=7,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=56,
field_b1=True,
modelc_fk=c1,
)
qx = Q(
ticket23605b__pk__in=Ticket23605B.objects.order_by(
"modela_fk", "-field_b1"
).distinct("modela_fk")
) & Q(ticket23605b__field_b0__gte=300)
qy = Q(
ticket23605b__in=Ticket23605B.objects.order_by(
"modela_fk", "-field_b1"
).distinct("modela_fk")
) & Q(ticket23605b__field_b0__gte=300)
self.assertEqual(
set(Ticket23605A.objects.filter(qx).values_list("pk", flat=True)),
set(Ticket23605A.objects.filter(qy).values_list("pk", flat=True)),
)
self.assertSequenceEqual(Ticket23605A.objects.filter(qx), [a2])
|
c4a2065061236fc53c1332d659d816ba6d0bfbdaeb7decbf99c46ee5dc1c8881 | import datetime
from copy import deepcopy
from django.core.exceptions import FieldError, MultipleObjectsReturned
from django.db import IntegrityError, models, transaction
from django.test import TestCase
from django.utils.translation import gettext_lazy
from .models import (
Article,
Category,
Child,
ChildNullableParent,
ChildStringPrimaryKeyParent,
City,
Country,
District,
First,
Parent,
ParentStringPrimaryKey,
Record,
Relation,
Reporter,
School,
Student,
Third,
ToFieldChild,
)
class ManyToOneTests(TestCase):
@classmethod
def setUpTestData(cls):
# Create a few Reporters.
cls.r = Reporter(first_name="John", last_name="Smith", email="[email protected]")
cls.r.save()
cls.r2 = Reporter(
first_name="Paul", last_name="Jones", email="[email protected]"
)
cls.r2.save()
# Create an Article.
cls.a = Article(
headline="This is a test",
pub_date=datetime.date(2005, 7, 27),
reporter=cls.r,
)
cls.a.save()
def test_get(self):
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
self.assertEqual((r.first_name, self.r.last_name), ("John", "Smith"))
def test_create(self):
# You can also instantiate an Article by passing the Reporter's ID
# instead of a Reporter object.
a3 = Article(
headline="Third article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=self.r.id,
)
a3.save()
self.assertEqual(a3.reporter.id, self.r.id)
# Similarly, the reporter ID can be a string.
a4 = Article(
headline="Fourth article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=str(self.r.id),
)
a4.save()
self.assertEqual(repr(a4.reporter), "<Reporter: John Smith>")
def test_add(self):
# Create an Article via the Reporter object.
new_article = self.r.article_set.create(
headline="John's second story", pub_date=datetime.date(2005, 7, 29)
)
self.assertEqual(repr(new_article), "<Article: John's second story>")
self.assertEqual(new_article.reporter.id, self.r.id)
# Create a new article, and add it to the article set.
new_article2 = Article(
headline="Paul's story", pub_date=datetime.date(2006, 1, 17)
)
msg = (
"<Article: Paul's story> instance isn't saved. Use bulk=False or save the "
"object first."
)
with self.assertRaisesMessage(ValueError, msg):
self.r.article_set.add(new_article2)
self.r.article_set.add(new_article2, bulk=False)
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertSequenceEqual(
self.r.article_set.all(),
[new_article, new_article2, self.a],
)
# Add the same article to a different article set - check that it moves.
self.r2.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r2.id)
self.assertSequenceEqual(self.r2.article_set.all(), [new_article2])
# Adding an object of the wrong type raises TypeError.
with transaction.atomic():
with self.assertRaisesMessage(
TypeError, "'Article' instance expected, got <Reporter:"
):
self.r.article_set.add(self.r2)
self.assertSequenceEqual(
self.r.article_set.all(),
[new_article, self.a],
)
def test_set(self):
new_article = self.r.article_set.create(
headline="John's second story", pub_date=datetime.date(2005, 7, 29)
)
new_article2 = self.r2.article_set.create(
headline="Paul's story", pub_date=datetime.date(2006, 1, 17)
)
# Assign the article to the reporter.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertSequenceEqual(
self.r.article_set.all(),
[new_article, new_article2, self.a],
)
self.assertSequenceEqual(self.r2.article_set.all(), [])
# Set the article back again.
self.r2.article_set.set([new_article, new_article2])
self.assertSequenceEqual(self.r.article_set.all(), [self.a])
self.assertSequenceEqual(
self.r2.article_set.all(),
[new_article, new_article2],
)
# Funny case - because the ForeignKey cannot be null,
# existing members of the set must remain.
self.r.article_set.set([new_article])
self.assertSequenceEqual(
self.r.article_set.all(),
[new_article, self.a],
)
self.assertSequenceEqual(self.r2.article_set.all(), [new_article2])
def test_reverse_assignment_deprecation(self):
msg = (
"Direct assignment to the reverse side of a related set is "
"prohibited. Use article_set.set() instead."
)
with self.assertRaisesMessage(TypeError, msg):
self.r2.article_set = []
def test_assign(self):
new_article = self.r.article_set.create(
headline="John's second story", pub_date=datetime.date(2005, 7, 29)
)
new_article2 = self.r2.article_set.create(
headline="Paul's story", pub_date=datetime.date(2006, 1, 17)
)
# Assign the article to the reporter directly using the descriptor.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertSequenceEqual(
self.r.article_set.all(),
[new_article, new_article2, self.a],
)
self.assertSequenceEqual(self.r2.article_set.all(), [])
# Set the article back again using set() method.
self.r2.article_set.set([new_article, new_article2])
self.assertSequenceEqual(self.r.article_set.all(), [self.a])
self.assertSequenceEqual(
self.r2.article_set.all(),
[new_article, new_article2],
)
# Because the ForeignKey cannot be null, existing members of the set
# must remain.
self.r.article_set.set([new_article])
self.assertSequenceEqual(
self.r.article_set.all(),
[new_article, self.a],
)
self.assertSequenceEqual(self.r2.article_set.all(), [new_article2])
# Reporter cannot be null - there should not be a clear or remove method
self.assertFalse(hasattr(self.r2.article_set, "remove"))
self.assertFalse(hasattr(self.r2.article_set, "clear"))
def test_assign_fk_id_value(self):
parent = Parent.objects.create(name="jeff")
child1 = Child.objects.create(name="frank", parent=parent)
child2 = Child.objects.create(name="randy", parent=parent)
parent.bestchild = child1
parent.save()
parent.bestchild_id = child2.pk
parent.save()
self.assertEqual(parent.bestchild_id, child2.pk)
self.assertFalse(Parent.bestchild.is_cached(parent))
self.assertEqual(parent.bestchild, child2)
self.assertTrue(Parent.bestchild.is_cached(parent))
# Reassigning the same value doesn't clear cached instance.
parent.bestchild_id = child2.pk
self.assertTrue(Parent.bestchild.is_cached(parent))
def test_assign_fk_id_none(self):
parent = Parent.objects.create(name="jeff")
child = Child.objects.create(name="frank", parent=parent)
parent.bestchild = child
parent.save()
parent.bestchild_id = None
parent.save()
self.assertIsNone(parent.bestchild_id)
self.assertFalse(Parent.bestchild.is_cached(parent))
self.assertIsNone(parent.bestchild)
self.assertTrue(Parent.bestchild.is_cached(parent))
def test_selects(self):
new_article1 = self.r.article_set.create(
headline="John's second story",
pub_date=datetime.date(2005, 7, 29),
)
new_article2 = self.r2.article_set.create(
headline="Paul's story",
pub_date=datetime.date(2006, 1, 17),
)
# Reporter objects have access to their related Article objects.
self.assertSequenceEqual(
self.r.article_set.all(),
[new_article1, self.a],
)
self.assertSequenceEqual(
self.r.article_set.filter(headline__startswith="This"), [self.a]
)
self.assertEqual(self.r.article_set.count(), 2)
self.assertEqual(self.r2.article_set.count(), 1)
# Get articles by id
self.assertSequenceEqual(Article.objects.filter(id__exact=self.a.id), [self.a])
self.assertSequenceEqual(Article.objects.filter(pk=self.a.id), [self.a])
# Query on an article property
self.assertSequenceEqual(
Article.objects.filter(headline__startswith="This"), [self.a]
)
# The API automatically follows relationships as far as you need.
# Use double underscores to separate relationships.
# This works as many levels deep as you want. There's no limit.
# Find all Articles for any Reporter whose first name is "John".
self.assertSequenceEqual(
Article.objects.filter(reporter__first_name__exact="John"),
[new_article1, self.a],
)
# Implied __exact also works
self.assertSequenceEqual(
Article.objects.filter(reporter__first_name="John"),
[new_article1, self.a],
)
# Query twice over the related field.
self.assertSequenceEqual(
Article.objects.filter(
reporter__first_name__exact="John", reporter__last_name__exact="Smith"
),
[new_article1, self.a],
)
# The underlying query only makes one join when a related table is
# referenced twice.
queryset = Article.objects.filter(
reporter__first_name__exact="John", reporter__last_name__exact="Smith"
)
self.assertNumQueries(1, list, queryset)
self.assertEqual(
queryset.query.get_compiler(queryset.db).as_sql()[0].count("INNER JOIN"), 1
)
# The automatically joined table has a predictable name.
self.assertSequenceEqual(
Article.objects.filter(reporter__first_name__exact="John").extra(
where=["many_to_one_reporter.last_name='Smith'"]
),
[new_article1, self.a],
)
# ... and should work fine with the string that comes out of
# forms.Form.cleaned_data.
self.assertQuerysetEqual(
(
Article.objects.filter(reporter__first_name__exact="John").extra(
where=["many_to_one_reporter.last_name='%s'" % "Smith"]
)
),
[new_article1, self.a],
)
# Find all Articles for a Reporter.
# Use direct ID check, pk check, and object comparison
self.assertSequenceEqual(
Article.objects.filter(reporter__id__exact=self.r.id),
[new_article1, self.a],
)
self.assertSequenceEqual(
Article.objects.filter(reporter__pk=self.r.id),
[new_article1, self.a],
)
self.assertSequenceEqual(
Article.objects.filter(reporter=self.r.id),
[new_article1, self.a],
)
self.assertSequenceEqual(
Article.objects.filter(reporter=self.r),
[new_article1, self.a],
)
self.assertSequenceEqual(
Article.objects.filter(reporter__in=[self.r.id, self.r2.id]).distinct(),
[new_article1, new_article2, self.a],
)
self.assertSequenceEqual(
Article.objects.filter(reporter__in=[self.r, self.r2]).distinct(),
[new_article1, new_article2, self.a],
)
# You can also use a queryset instead of a literal list of instances.
# The queryset must be reduced to a list of values using values(),
# then converted into a query
self.assertSequenceEqual(
Article.objects.filter(
reporter__in=Reporter.objects.filter(first_name="John")
.values("pk")
.query
).distinct(),
[new_article1, self.a],
)
def test_reverse_selects(self):
a3 = Article.objects.create(
headline="Third article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=self.r.id,
)
Article.objects.create(
headline="Fourth article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=self.r.id,
)
john_smith = [self.r]
# Reporters can be queried
self.assertSequenceEqual(
Reporter.objects.filter(id__exact=self.r.id), john_smith
)
self.assertSequenceEqual(Reporter.objects.filter(pk=self.r.id), john_smith)
self.assertSequenceEqual(
Reporter.objects.filter(first_name__startswith="John"), john_smith
)
# Reporters can query in opposite direction of ForeignKey definition
self.assertSequenceEqual(
Reporter.objects.filter(article__id__exact=self.a.id), john_smith
)
self.assertSequenceEqual(
Reporter.objects.filter(article__pk=self.a.id), john_smith
)
self.assertSequenceEqual(Reporter.objects.filter(article=self.a.id), john_smith)
self.assertSequenceEqual(Reporter.objects.filter(article=self.a), john_smith)
self.assertSequenceEqual(
Reporter.objects.filter(article__in=[self.a.id, a3.id]).distinct(),
john_smith,
)
self.assertSequenceEqual(
Reporter.objects.filter(article__in=[self.a.id, a3]).distinct(), john_smith
)
self.assertSequenceEqual(
Reporter.objects.filter(article__in=[self.a, a3]).distinct(), john_smith
)
self.assertCountEqual(
Reporter.objects.filter(article__headline__startswith="T"),
[self.r, self.r],
)
self.assertSequenceEqual(
Reporter.objects.filter(article__headline__startswith="T").distinct(),
john_smith,
)
# Counting in the opposite direction works in conjunction with distinct()
self.assertEqual(
Reporter.objects.filter(article__headline__startswith="T").count(), 2
)
self.assertEqual(
Reporter.objects.filter(article__headline__startswith="T")
.distinct()
.count(),
1,
)
# Queries can go round in circles.
self.assertCountEqual(
Reporter.objects.filter(article__reporter__first_name__startswith="John"),
[self.r, self.r, self.r],
)
self.assertSequenceEqual(
Reporter.objects.filter(
article__reporter__first_name__startswith="John"
).distinct(),
john_smith,
)
self.assertSequenceEqual(
Reporter.objects.filter(article__reporter__exact=self.r).distinct(),
john_smith,
)
# Implied __exact also works.
self.assertSequenceEqual(
Reporter.objects.filter(article__reporter=self.r).distinct(), john_smith
)
# It's possible to use values() calls across many-to-one relations.
# (Note, too, that we clear the ordering here so as not to drag the
# 'headline' field into the columns being used to determine uniqueness)
d = {"reporter__first_name": "John", "reporter__last_name": "Smith"}
qs = (
Article.objects.filter(
reporter=self.r,
)
.distinct()
.order_by()
.values("reporter__first_name", "reporter__last_name")
)
self.assertEqual([d], list(qs))
def test_select_related(self):
# Article.objects.select_related().dates() works properly when there
# are multiple Articles with the same date but different foreign-key
# objects (Reporters).
r1 = Reporter.objects.create(
first_name="Mike", last_name="Royko", email="[email protected]"
)
r2 = Reporter.objects.create(
first_name="John", last_name="Kass", email="[email protected]"
)
Article.objects.create(
headline="First", pub_date=datetime.date(1980, 4, 23), reporter=r1
)
Article.objects.create(
headline="Second", pub_date=datetime.date(1980, 4, 23), reporter=r2
)
self.assertEqual(
list(Article.objects.select_related().dates("pub_date", "day")),
[datetime.date(1980, 4, 23), datetime.date(2005, 7, 27)],
)
self.assertEqual(
list(Article.objects.select_related().dates("pub_date", "month")),
[datetime.date(1980, 4, 1), datetime.date(2005, 7, 1)],
)
self.assertEqual(
list(Article.objects.select_related().dates("pub_date", "year")),
[datetime.date(1980, 1, 1), datetime.date(2005, 1, 1)],
)
def test_delete(self):
new_article1 = self.r.article_set.create(
headline="John's second story",
pub_date=datetime.date(2005, 7, 29),
)
new_article2 = self.r2.article_set.create(
headline="Paul's story",
pub_date=datetime.date(2006, 1, 17),
)
new_article3 = Article.objects.create(
headline="Third article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=self.r.id,
)
new_article4 = Article.objects.create(
headline="Fourth article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=str(self.r.id),
)
# If you delete a reporter, their articles will be deleted.
self.assertSequenceEqual(
Article.objects.all(),
[new_article4, new_article1, new_article2, new_article3, self.a],
)
self.assertSequenceEqual(
Reporter.objects.order_by("first_name"),
[self.r, self.r2],
)
self.r2.delete()
self.assertSequenceEqual(
Article.objects.all(),
[new_article4, new_article1, new_article3, self.a],
)
self.assertSequenceEqual(Reporter.objects.order_by("first_name"), [self.r])
# You can delete using a JOIN in the query.
Reporter.objects.filter(article__headline__startswith="This").delete()
self.assertSequenceEqual(Reporter.objects.all(), [])
self.assertSequenceEqual(Article.objects.all(), [])
def test_explicit_fk(self):
# Create a new Article with get_or_create using an explicit value
# for a ForeignKey.
a2, created = Article.objects.get_or_create(
headline="John's second test",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r.id,
)
self.assertTrue(created)
self.assertEqual(a2.reporter.id, self.r.id)
# You can specify filters containing the explicit FK value.
self.assertSequenceEqual(
Article.objects.filter(reporter_id__exact=self.r.id),
[a2, self.a],
)
# Create an Article by Paul for the same date.
a3 = Article.objects.create(
headline="Paul's commentary",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r2.id,
)
self.assertEqual(a3.reporter.id, self.r2.id)
# Get should respect explicit foreign keys as well.
msg = "get() returned more than one Article -- it returned 2!"
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(reporter_id=self.r.id)
self.assertEqual(
repr(a3),
repr(
Article.objects.get(
reporter_id=self.r2.id, pub_date=datetime.date(2011, 5, 7)
)
),
)
def test_deepcopy_and_circular_references(self):
# Regression for #12876 -- Model methods that include queries that
# recursive don't cause recursion depth problems under deepcopy.
self.r.cached_query = Article.objects.filter(reporter=self.r)
self.assertEqual(repr(deepcopy(self.r)), "<Reporter: John Smith>")
def test_manager_class_caching(self):
r1 = Reporter.objects.create(first_name="Mike")
r2 = Reporter.objects.create(first_name="John")
# Same twice
self.assertIs(r1.article_set.__class__, r1.article_set.__class__)
# Same as each other
self.assertIs(r1.article_set.__class__, r2.article_set.__class__)
def test_create_relation_with_gettext_lazy(self):
reporter = Reporter.objects.create(
first_name="John", last_name="Smith", email="[email protected]"
)
lazy = gettext_lazy("test")
reporter.article_set.create(headline=lazy, pub_date=datetime.date(2011, 6, 10))
notlazy = str(lazy)
article = reporter.article_set.get()
self.assertEqual(article.headline, notlazy)
def test_values_list_exception(self):
expected_message = (
"Cannot resolve keyword 'notafield' into field. Choices are: %s"
)
reporter_fields = ", ".join(sorted(f.name for f in Reporter._meta.get_fields()))
with self.assertRaisesMessage(FieldError, expected_message % reporter_fields):
Article.objects.values_list("reporter__notafield")
article_fields = ", ".join(
["EXTRA"] + sorted(f.name for f in Article._meta.get_fields())
)
with self.assertRaisesMessage(FieldError, expected_message % article_fields):
Article.objects.extra(select={"EXTRA": "EXTRA_SELECT"}).values_list(
"notafield"
)
def test_fk_assignment_and_related_object_cache(self):
# Tests of ForeignKey assignment and the related-object cache (see #6886).
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
# Look up the object again so that we get a "fresh" object.
c = Child.objects.get(name="Child")
p = c.parent
# Accessing the related object again returns the exactly same object.
self.assertIs(c.parent, p)
# But if we kill the cache, we get a new object.
del c._state.fields_cache["parent"]
self.assertIsNot(c.parent, p)
# Assigning a new object results in that object getting cached immediately.
p2 = Parent.objects.create(name="Parent 2")
c.parent = p2
self.assertIs(c.parent, p2)
# Assigning None succeeds if field is null=True.
p.bestchild = None
self.assertIsNone(p.bestchild)
# bestchild should still be None after saving.
p.save()
self.assertIsNone(p.bestchild)
# bestchild should still be None after fetching the object again.
p = Parent.objects.get(name="Parent")
self.assertIsNone(p.bestchild)
# Assigning None will not fail: Child.parent is null=False.
setattr(c, "parent", None)
# You also can't assign an object of the wrong type here
msg = (
'Cannot assign "<First: First object (1)>": "Child.parent" must '
'be a "Parent" instance.'
)
with self.assertRaisesMessage(ValueError, msg):
setattr(c, "parent", First(id=1, second=1))
# You can assign None to Child.parent during object creation.
Child(name="xyzzy", parent=None)
# But when trying to save a Child with parent=None, the database will
# raise IntegrityError.
with self.assertRaises(IntegrityError), transaction.atomic():
Child.objects.create(name="xyzzy", parent=None)
# Creation using keyword argument should cache the related object.
p = Parent.objects.get(name="Parent")
c = Child(parent=p)
self.assertIs(c.parent, p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Parent()
msg = (
"save() prohibited to prevent data loss due to unsaved related object "
"'parent'."
)
with self.assertRaisesMessage(ValueError, msg):
Child.objects.create(parent=p)
with self.assertRaisesMessage(ValueError, msg):
ToFieldChild.objects.create(parent=p)
# Creation using attname keyword argument and an id will cause the
# related object to be fetched.
p = Parent.objects.get(name="Parent")
c = Child(parent_id=p.id)
self.assertIsNot(c.parent, p)
self.assertEqual(c.parent, p)
def test_save_parent_after_assign(self):
category = Category(name="cats")
record = Record(category=category)
category.save()
record.save()
category.name = "dogs"
with self.assertNumQueries(0):
self.assertEqual(category.id, record.category_id)
self.assertEqual(category.name, record.category.name)
def test_save_nullable_fk_after_parent(self):
parent = Parent()
child = ChildNullableParent(parent=parent)
parent.save()
child.save()
child.refresh_from_db()
self.assertEqual(child.parent, parent)
def test_save_nullable_fk_after_parent_with_to_field(self):
parent = Parent(name="jeff")
child = ToFieldChild(parent=parent)
parent.save()
child.save()
child.refresh_from_db()
self.assertEqual(child.parent, parent)
self.assertEqual(child.parent_id, parent.name)
def test_save_fk_after_parent_with_non_numeric_pk_set_on_child(self):
parent = ParentStringPrimaryKey()
child = ChildStringPrimaryKeyParent(parent=parent)
child.parent.name = "jeff"
parent.save()
child.save()
child.refresh_from_db()
self.assertEqual(child.parent, parent)
self.assertEqual(child.parent_id, parent.name)
def test_fk_to_bigautofield(self):
ch = City.objects.create(name="Chicago")
District.objects.create(city=ch, name="Far South")
District.objects.create(city=ch, name="North")
ny = City.objects.create(name="New York", id=2**33)
District.objects.create(city=ny, name="Brooklyn")
District.objects.create(city=ny, name="Manhattan")
def test_fk_to_smallautofield(self):
us = Country.objects.create(name="United States")
City.objects.create(country=us, name="Chicago")
City.objects.create(country=us, name="New York")
uk = Country.objects.create(name="United Kingdom", id=2**11)
City.objects.create(country=uk, name="London")
City.objects.create(country=uk, name="Edinburgh")
def test_multiple_foreignkeys(self):
# Test of multiple ForeignKeys to the same model (bug #7125).
c1 = Category.objects.create(name="First")
c2 = Category.objects.create(name="Second")
c3 = Category.objects.create(name="Third")
r1 = Record.objects.create(category=c1)
r2 = Record.objects.create(category=c1)
r3 = Record.objects.create(category=c2)
r4 = Record.objects.create(category=c2)
r5 = Record.objects.create(category=c3)
Relation.objects.create(left=r1, right=r2)
Relation.objects.create(left=r3, right=r4)
rel = Relation.objects.create(left=r1, right=r3)
Relation.objects.create(left=r5, right=r2)
Relation.objects.create(left=r3, right=r2)
q1 = Relation.objects.filter(
left__category__name__in=["First"], right__category__name__in=["Second"]
)
self.assertSequenceEqual(q1, [rel])
q2 = Category.objects.filter(
record__left_set__right__category__name="Second"
).order_by("name")
self.assertSequenceEqual(q2, [c1, c2])
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
msg = 'Cannot assign "%r": "Child.parent" must be a "Parent" instance.' % c
with self.assertRaisesMessage(ValueError, msg):
Child.objects.create(name="Grandchild", parent=c)
def test_fk_instantiation_outside_model(self):
# Regression for #12190 -- Should be able to instantiate a FK outside
# of a model, and interrogate its related field.
cat = models.ForeignKey(Category, models.CASCADE)
self.assertEqual("id", cat.remote_field.get_related_field().name)
def test_relation_unsaved(self):
Third.objects.create(name="Third 1")
Third.objects.create(name="Third 2")
th = Third(name="testing")
# The object isn't saved and the relation cannot be used.
msg = (
"'Third' instance needs to have a primary key value before this "
"relationship can be used."
)
with self.assertRaisesMessage(ValueError, msg):
th.child_set.count()
# The reverse foreign key manager can be created.
self.assertEqual(th.child_set.model, Third)
th.save()
# Now the model is saved, so we will need to execute a query.
with self.assertNumQueries(1):
self.assertEqual(th.child_set.count(), 0)
def test_related_object(self):
public_school = School.objects.create(is_public=True)
public_student = Student.objects.create(school=public_school)
private_school = School.objects.create(is_public=False)
private_student = Student.objects.create(school=private_school)
# Only one school is available via all() due to the custom default manager.
self.assertSequenceEqual(School.objects.all(), [public_school])
self.assertEqual(public_student.school, public_school)
# Make sure the base manager is used so that a student can still access
# its related school even if the default manager doesn't normally
# allow it.
self.assertEqual(private_student.school, private_school)
School._meta.base_manager_name = "objects"
School._meta._expire_cache()
try:
private_student = Student.objects.get(pk=private_student.pk)
with self.assertRaises(School.DoesNotExist):
private_student.school
finally:
School._meta.base_manager_name = None
School._meta._expire_cache()
def test_hasattr_related_object(self):
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(Article(), "reporter"))
def test_clear_after_prefetch(self):
c = City.objects.create(name="Musical City")
d = District.objects.create(name="Ladida", city=c)
city = City.objects.prefetch_related("districts").get(id=c.id)
self.assertSequenceEqual(city.districts.all(), [d])
city.districts.clear()
self.assertSequenceEqual(city.districts.all(), [])
def test_remove_after_prefetch(self):
c = City.objects.create(name="Musical City")
d = District.objects.create(name="Ladida", city=c)
city = City.objects.prefetch_related("districts").get(id=c.id)
self.assertSequenceEqual(city.districts.all(), [d])
city.districts.remove(d)
self.assertSequenceEqual(city.districts.all(), [])
def test_add_after_prefetch(self):
c = City.objects.create(name="Musical City")
District.objects.create(name="Ladida", city=c)
d2 = District.objects.create(name="Ladidu")
city = City.objects.prefetch_related("districts").get(id=c.id)
self.assertEqual(city.districts.count(), 1)
city.districts.add(d2)
self.assertEqual(city.districts.count(), 2)
def test_set_after_prefetch(self):
c = City.objects.create(name="Musical City")
District.objects.create(name="Ladida", city=c)
d2 = District.objects.create(name="Ladidu")
city = City.objects.prefetch_related("districts").get(id=c.id)
self.assertEqual(city.districts.count(), 1)
city.districts.set([d2])
self.assertSequenceEqual(city.districts.all(), [d2])
def test_add_then_remove_after_prefetch(self):
c = City.objects.create(name="Musical City")
District.objects.create(name="Ladida", city=c)
d2 = District.objects.create(name="Ladidu")
city = City.objects.prefetch_related("districts").get(id=c.id)
self.assertEqual(city.districts.count(), 1)
city.districts.add(d2)
self.assertEqual(city.districts.count(), 2)
city.districts.remove(d2)
self.assertEqual(city.districts.count(), 1)
def test_cached_relation_invalidated_on_save(self):
"""
Model.save() invalidates stale ForeignKey relations after a primary key
assignment.
"""
self.assertEqual(self.a.reporter, self.r) # caches a.reporter
self.a.reporter_id = self.r2.pk
self.a.save()
self.assertEqual(self.a.reporter, self.r2)
def test_cached_foreign_key_with_to_field_not_cleared_by_save(self):
parent = Parent.objects.create(name="a")
child = ToFieldChild.objects.create(parent=parent)
with self.assertNumQueries(0):
self.assertIs(child.parent, parent)
def test_reverse_foreign_key_instance_to_field_caching(self):
parent = Parent.objects.create(name="a")
ToFieldChild.objects.create(parent=parent)
child = parent.to_field_children.get()
with self.assertNumQueries(0):
self.assertIs(child.parent, parent)
def test_add_remove_set_by_pk_raises(self):
usa = Country.objects.create(name="United States")
chicago = City.objects.create(name="Chicago")
msg = "'City' instance expected, got %s" % chicago.pk
with self.assertRaisesMessage(TypeError, msg):
usa.cities.add(chicago.pk)
with self.assertRaisesMessage(TypeError, msg):
usa.cities.remove(chicago.pk)
with self.assertRaisesMessage(TypeError, msg):
usa.cities.set([chicago.pk])
|
e2ec5fc7de3c57773f226876df9370837bcb722284c670cacf3dece6d35e79f1 | import datetime
from decimal import Decimal
from unittest import mock
from django.core.exceptions import FieldError
from django.db import NotSupportedError, connection
from django.db.models import (
Avg,
Case,
Count,
F,
IntegerField,
Max,
Min,
OuterRef,
Q,
RowRange,
Subquery,
Sum,
Value,
ValueRange,
When,
Window,
WindowFrame,
)
from django.db.models.fields.json import KeyTextTransform, KeyTransform
from django.db.models.functions import (
Cast,
CumeDist,
DenseRank,
ExtractYear,
FirstValue,
Lag,
LastValue,
Lead,
NthValue,
Ntile,
PercentRank,
Rank,
RowNumber,
Upper,
)
from django.db.models.lookups import Exact
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from .models import Classification, Detail, Employee, PastEmployeeDepartment
@skipUnlessDBFeature("supports_over_clause")
class WindowFunctionTests(TestCase):
@classmethod
def setUpTestData(cls):
classification = Classification.objects.create()
Employee.objects.bulk_create(
[
Employee(
name=e[0],
salary=e[1],
department=e[2],
hire_date=e[3],
age=e[4],
bonus=Decimal(e[1]) / 400,
classification=classification,
)
for e in [
("Jones", 45000, "Accounting", datetime.datetime(2005, 11, 1), 20),
(
"Williams",
37000,
"Accounting",
datetime.datetime(2009, 6, 1),
20,
),
("Jenson", 45000, "Accounting", datetime.datetime(2008, 4, 1), 20),
("Adams", 50000, "Accounting", datetime.datetime(2013, 7, 1), 50),
("Smith", 55000, "Sales", datetime.datetime(2007, 6, 1), 30),
("Brown", 53000, "Sales", datetime.datetime(2009, 9, 1), 30),
("Johnson", 40000, "Marketing", datetime.datetime(2012, 3, 1), 30),
("Smith", 38000, "Marketing", datetime.datetime(2009, 10, 1), 20),
("Wilkinson", 60000, "IT", datetime.datetime(2011, 3, 1), 40),
("Moore", 34000, "IT", datetime.datetime(2013, 8, 1), 40),
("Miller", 100000, "Management", datetime.datetime(2005, 6, 1), 40),
("Johnson", 80000, "Management", datetime.datetime(2005, 7, 1), 50),
]
]
)
employees = list(Employee.objects.order_by("pk"))
PastEmployeeDepartment.objects.bulk_create(
[
PastEmployeeDepartment(employee=employees[6], department="Sales"),
PastEmployeeDepartment(employee=employees[10], department="IT"),
]
)
def test_dense_rank(self):
tests = [
ExtractYear(F("hire_date")).asc(),
F("hire_date__year").asc(),
"hire_date__year",
]
for order_by in tests:
with self.subTest(order_by=order_by):
qs = Employee.objects.annotate(
rank=Window(expression=DenseRank(), order_by=order_by),
)
self.assertQuerysetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 1),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 1),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 1),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 2),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 3),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 4),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 4),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 4),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 5),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 6),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 7),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 7),
],
lambda entry: (
entry.name,
entry.salary,
entry.department,
entry.hire_date,
entry.rank,
),
ordered=False,
)
def test_department_salary(self):
qs = Employee.objects.annotate(
department_sum=Window(
expression=Sum("salary"),
partition_by=F("department"),
order_by=[F("hire_date").asc()],
)
).order_by("department", "department_sum")
self.assertQuerysetEqual(
qs,
[
("Jones", "Accounting", 45000, 45000),
("Jenson", "Accounting", 45000, 90000),
("Williams", "Accounting", 37000, 127000),
("Adams", "Accounting", 50000, 177000),
("Wilkinson", "IT", 60000, 60000),
("Moore", "IT", 34000, 94000),
("Miller", "Management", 100000, 100000),
("Johnson", "Management", 80000, 180000),
("Smith", "Marketing", 38000, 38000),
("Johnson", "Marketing", 40000, 78000),
("Smith", "Sales", 55000, 55000),
("Brown", "Sales", 53000, 108000),
],
lambda entry: (
entry.name,
entry.department,
entry.salary,
entry.department_sum,
),
)
def test_rank(self):
"""
Rank the employees based on the year they're were hired. Since there
are multiple employees hired in different years, this will contain
gaps.
"""
qs = Employee.objects.annotate(
rank=Window(
expression=Rank(),
order_by=F("hire_date__year").asc(),
)
)
self.assertQuerysetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 1),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 1),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 1),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 4),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 5),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 6),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 6),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 6),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 9),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 10),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 11),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 11),
],
lambda entry: (
entry.name,
entry.salary,
entry.department,
entry.hire_date,
entry.rank,
),
ordered=False,
)
def test_row_number(self):
"""
The row number window function computes the number based on the order
in which the tuples were inserted. Depending on the backend,
Oracle requires an ordering-clause in the Window expression.
"""
qs = Employee.objects.annotate(
row_number=Window(
expression=RowNumber(),
order_by=F("pk").asc(),
)
).order_by("pk")
self.assertQuerysetEqual(
qs,
[
("Jones", "Accounting", 1),
("Williams", "Accounting", 2),
("Jenson", "Accounting", 3),
("Adams", "Accounting", 4),
("Smith", "Sales", 5),
("Brown", "Sales", 6),
("Johnson", "Marketing", 7),
("Smith", "Marketing", 8),
("Wilkinson", "IT", 9),
("Moore", "IT", 10),
("Miller", "Management", 11),
("Johnson", "Management", 12),
],
lambda entry: (entry.name, entry.department, entry.row_number),
)
def test_row_number_no_ordering(self):
"""
The row number window function computes the number based on the order
in which the tuples were inserted.
"""
# Add a default ordering for consistent results across databases.
qs = Employee.objects.annotate(
row_number=Window(
expression=RowNumber(),
)
).order_by("pk")
self.assertQuerysetEqual(
qs,
[
("Jones", "Accounting", 1),
("Williams", "Accounting", 2),
("Jenson", "Accounting", 3),
("Adams", "Accounting", 4),
("Smith", "Sales", 5),
("Brown", "Sales", 6),
("Johnson", "Marketing", 7),
("Smith", "Marketing", 8),
("Wilkinson", "IT", 9),
("Moore", "IT", 10),
("Miller", "Management", 11),
("Johnson", "Management", 12),
],
lambda entry: (entry.name, entry.department, entry.row_number),
)
def test_avg_salary_department(self):
qs = Employee.objects.annotate(
avg_salary=Window(
expression=Avg("salary"),
order_by=F("department").asc(),
partition_by="department",
)
).order_by("department", "-salary", "name")
self.assertQuerysetEqual(
qs,
[
("Adams", 50000, "Accounting", 44250.00),
("Jenson", 45000, "Accounting", 44250.00),
("Jones", 45000, "Accounting", 44250.00),
("Williams", 37000, "Accounting", 44250.00),
("Wilkinson", 60000, "IT", 47000.00),
("Moore", 34000, "IT", 47000.00),
("Miller", 100000, "Management", 90000.00),
("Johnson", 80000, "Management", 90000.00),
("Johnson", 40000, "Marketing", 39000.00),
("Smith", 38000, "Marketing", 39000.00),
("Smith", 55000, "Sales", 54000.00),
("Brown", 53000, "Sales", 54000.00),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.avg_salary,
),
)
def test_lag(self):
"""
Compute the difference between an employee's salary and the next
highest salary in the employee's department. Return None if the
employee has the lowest salary.
"""
qs = Employee.objects.annotate(
lag=Window(
expression=Lag(expression="salary", offset=1),
partition_by=F("department"),
order_by=[F("salary").asc(), F("name").asc()],
)
).order_by("department", F("salary").asc(), F("name").asc())
self.assertQuerysetEqual(
qs,
[
("Williams", 37000, "Accounting", None),
("Jenson", 45000, "Accounting", 37000),
("Jones", 45000, "Accounting", 45000),
("Adams", 50000, "Accounting", 45000),
("Moore", 34000, "IT", None),
("Wilkinson", 60000, "IT", 34000),
("Johnson", 80000, "Management", None),
("Miller", 100000, "Management", 80000),
("Smith", 38000, "Marketing", None),
("Johnson", 40000, "Marketing", 38000),
("Brown", 53000, "Sales", None),
("Smith", 55000, "Sales", 53000),
],
transform=lambda row: (row.name, row.salary, row.department, row.lag),
)
def test_lag_decimalfield(self):
qs = Employee.objects.annotate(
lag=Window(
expression=Lag(expression="bonus", offset=1),
partition_by=F("department"),
order_by=[F("bonus").asc(), F("name").asc()],
)
).order_by("department", F("bonus").asc(), F("name").asc())
self.assertQuerysetEqual(
qs,
[
("Williams", 92.5, "Accounting", None),
("Jenson", 112.5, "Accounting", 92.5),
("Jones", 112.5, "Accounting", 112.5),
("Adams", 125, "Accounting", 112.5),
("Moore", 85, "IT", None),
("Wilkinson", 150, "IT", 85),
("Johnson", 200, "Management", None),
("Miller", 250, "Management", 200),
("Smith", 95, "Marketing", None),
("Johnson", 100, "Marketing", 95),
("Brown", 132.5, "Sales", None),
("Smith", 137.5, "Sales", 132.5),
],
transform=lambda row: (row.name, row.bonus, row.department, row.lag),
)
def test_first_value(self):
qs = Employee.objects.annotate(
first_value=Window(
expression=FirstValue("salary"),
partition_by=F("department"),
order_by=F("hire_date").asc(),
)
).order_by("department", "hire_date")
self.assertQuerysetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 45000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 45000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 45000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 60000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 60000),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 100000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 100000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 38000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 38000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 55000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 55000),
],
lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.first_value,
),
)
def test_last_value(self):
qs = Employee.objects.annotate(
last_value=Window(
expression=LastValue("hire_date"),
partition_by=F("department"),
order_by=F("hire_date").asc(),
)
)
self.assertQuerysetEqual(
qs,
[
(
"Adams",
"Accounting",
datetime.date(2013, 7, 1),
50000,
datetime.date(2013, 7, 1),
),
(
"Jenson",
"Accounting",
datetime.date(2008, 4, 1),
45000,
datetime.date(2008, 4, 1),
),
(
"Jones",
"Accounting",
datetime.date(2005, 11, 1),
45000,
datetime.date(2005, 11, 1),
),
(
"Williams",
"Accounting",
datetime.date(2009, 6, 1),
37000,
datetime.date(2009, 6, 1),
),
(
"Moore",
"IT",
datetime.date(2013, 8, 1),
34000,
datetime.date(2013, 8, 1),
),
(
"Wilkinson",
"IT",
datetime.date(2011, 3, 1),
60000,
datetime.date(2011, 3, 1),
),
(
"Miller",
"Management",
datetime.date(2005, 6, 1),
100000,
datetime.date(2005, 6, 1),
),
(
"Johnson",
"Management",
datetime.date(2005, 7, 1),
80000,
datetime.date(2005, 7, 1),
),
(
"Johnson",
"Marketing",
datetime.date(2012, 3, 1),
40000,
datetime.date(2012, 3, 1),
),
(
"Smith",
"Marketing",
datetime.date(2009, 10, 1),
38000,
datetime.date(2009, 10, 1),
),
(
"Brown",
"Sales",
datetime.date(2009, 9, 1),
53000,
datetime.date(2009, 9, 1),
),
(
"Smith",
"Sales",
datetime.date(2007, 6, 1),
55000,
datetime.date(2007, 6, 1),
),
],
transform=lambda row: (
row.name,
row.department,
row.hire_date,
row.salary,
row.last_value,
),
ordered=False,
)
def test_function_list_of_values(self):
qs = (
Employee.objects.annotate(
lead=Window(
expression=Lead(expression="salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
partition_by="department",
)
)
.values_list("name", "salary", "department", "hire_date", "lead")
.order_by("department", F("hire_date").asc(), F("name").desc())
)
self.assertNotIn("GROUP BY", str(qs.query))
self.assertSequenceEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 37000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 50000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), None),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 34000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), None),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 80000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), None),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 40000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), None),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 53000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), None),
],
)
def test_min_department(self):
"""An alternative way to specify a query for FirstValue."""
qs = Employee.objects.annotate(
min_salary=Window(
expression=Min("salary"),
partition_by=F("department"),
order_by=[F("salary").asc(), F("name").asc()],
)
).order_by("department", "salary", "name")
self.assertQuerysetEqual(
qs,
[
("Williams", "Accounting", 37000, 37000),
("Jenson", "Accounting", 45000, 37000),
("Jones", "Accounting", 45000, 37000),
("Adams", "Accounting", 50000, 37000),
("Moore", "IT", 34000, 34000),
("Wilkinson", "IT", 60000, 34000),
("Johnson", "Management", 80000, 80000),
("Miller", "Management", 100000, 80000),
("Smith", "Marketing", 38000, 38000),
("Johnson", "Marketing", 40000, 38000),
("Brown", "Sales", 53000, 53000),
("Smith", "Sales", 55000, 53000),
],
lambda row: (row.name, row.department, row.salary, row.min_salary),
)
def test_max_per_year(self):
"""
Find the maximum salary awarded in the same year as the
employee was hired, regardless of the department.
"""
qs = Employee.objects.annotate(
max_salary_year=Window(
expression=Max("salary"),
order_by=ExtractYear("hire_date").asc(),
partition_by=ExtractYear("hire_date"),
)
).order_by(ExtractYear("hire_date"), "salary")
self.assertQuerysetEqual(
qs,
[
("Jones", "Accounting", 45000, 2005, 100000),
("Johnson", "Management", 80000, 2005, 100000),
("Miller", "Management", 100000, 2005, 100000),
("Smith", "Sales", 55000, 2007, 55000),
("Jenson", "Accounting", 45000, 2008, 45000),
("Williams", "Accounting", 37000, 2009, 53000),
("Smith", "Marketing", 38000, 2009, 53000),
("Brown", "Sales", 53000, 2009, 53000),
("Wilkinson", "IT", 60000, 2011, 60000),
("Johnson", "Marketing", 40000, 2012, 40000),
("Moore", "IT", 34000, 2013, 50000),
("Adams", "Accounting", 50000, 2013, 50000),
],
lambda row: (
row.name,
row.department,
row.salary,
row.hire_date.year,
row.max_salary_year,
),
)
def test_cume_dist(self):
"""
Compute the cumulative distribution for the employees based on the
salary in increasing order. Equal to rank/total number of rows (12).
"""
qs = Employee.objects.annotate(
cume_dist=Window(
expression=CumeDist(),
order_by=F("salary").asc(),
)
).order_by("salary", "name")
# Round result of cume_dist because Oracle uses greater precision.
self.assertQuerysetEqual(
qs,
[
("Moore", "IT", 34000, 0.0833333333),
("Williams", "Accounting", 37000, 0.1666666667),
("Smith", "Marketing", 38000, 0.25),
("Johnson", "Marketing", 40000, 0.3333333333),
("Jenson", "Accounting", 45000, 0.5),
("Jones", "Accounting", 45000, 0.5),
("Adams", "Accounting", 50000, 0.5833333333),
("Brown", "Sales", 53000, 0.6666666667),
("Smith", "Sales", 55000, 0.75),
("Wilkinson", "IT", 60000, 0.8333333333),
("Johnson", "Management", 80000, 0.9166666667),
("Miller", "Management", 100000, 1),
],
lambda row: (
row.name,
row.department,
row.salary,
round(row.cume_dist, 10),
),
)
def test_nthvalue(self):
qs = Employee.objects.annotate(
nth_value=Window(
expression=NthValue(expression="salary", nth=2),
order_by=[F("hire_date").asc(), F("name").desc()],
partition_by=F("department"),
)
).order_by("department", "hire_date", "name")
self.assertQuerysetEqual(
qs,
[
("Jones", "Accounting", datetime.date(2005, 11, 1), 45000, None),
("Jenson", "Accounting", datetime.date(2008, 4, 1), 45000, 45000),
("Williams", "Accounting", datetime.date(2009, 6, 1), 37000, 45000),
("Adams", "Accounting", datetime.date(2013, 7, 1), 50000, 45000),
("Wilkinson", "IT", datetime.date(2011, 3, 1), 60000, None),
("Moore", "IT", datetime.date(2013, 8, 1), 34000, 34000),
("Miller", "Management", datetime.date(2005, 6, 1), 100000, None),
("Johnson", "Management", datetime.date(2005, 7, 1), 80000, 80000),
("Smith", "Marketing", datetime.date(2009, 10, 1), 38000, None),
("Johnson", "Marketing", datetime.date(2012, 3, 1), 40000, 40000),
("Smith", "Sales", datetime.date(2007, 6, 1), 55000, None),
("Brown", "Sales", datetime.date(2009, 9, 1), 53000, 53000),
],
lambda row: (
row.name,
row.department,
row.hire_date,
row.salary,
row.nth_value,
),
)
def test_lead(self):
"""
Determine what the next person hired in the same department makes.
Because the dataset is ambiguous, the name is also part of the
ordering clause. No default is provided, so None/NULL should be
returned.
"""
qs = Employee.objects.annotate(
lead=Window(
expression=Lead(expression="salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
partition_by="department",
)
).order_by("department", F("hire_date").asc(), F("name").desc())
self.assertQuerysetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 37000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 50000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), None),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 34000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), None),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 80000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), None),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 40000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), None),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 53000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), None),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.lead,
),
)
def test_lead_offset(self):
"""
Determine what the person hired after someone makes. Due to
ambiguity, the name is also included in the ordering.
"""
qs = Employee.objects.annotate(
lead=Window(
expression=Lead("salary", offset=2),
partition_by="department",
order_by=F("hire_date").asc(),
)
)
self.assertQuerysetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 37000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 50000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), None),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), None),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), None),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), None),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), None),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), None),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), None),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), None),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), None),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), None),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.lead,
),
ordered=False,
)
@skipUnlessDBFeature("supports_default_in_lead_lag")
def test_lead_default(self):
qs = Employee.objects.annotate(
lead_default=Window(
expression=Lead(expression="salary", offset=5, default=60000),
partition_by=F("department"),
order_by=F("department").asc(),
)
)
self.assertEqual(
list(qs.values_list("lead_default", flat=True).distinct()), [60000]
)
def test_ntile(self):
"""
Compute the group for each of the employees across the entire company,
based on how high the salary is for them. There are twelve employees
so it divides evenly into four groups.
"""
qs = Employee.objects.annotate(
ntile=Window(
expression=Ntile(num_buckets=4),
order_by="-salary",
)
).order_by("ntile", "-salary", "name")
self.assertQuerysetEqual(
qs,
[
("Miller", "Management", 100000, 1),
("Johnson", "Management", 80000, 1),
("Wilkinson", "IT", 60000, 1),
("Smith", "Sales", 55000, 2),
("Brown", "Sales", 53000, 2),
("Adams", "Accounting", 50000, 2),
("Jenson", "Accounting", 45000, 3),
("Jones", "Accounting", 45000, 3),
("Johnson", "Marketing", 40000, 3),
("Smith", "Marketing", 38000, 4),
("Williams", "Accounting", 37000, 4),
("Moore", "IT", 34000, 4),
],
lambda x: (x.name, x.department, x.salary, x.ntile),
)
def test_percent_rank(self):
"""
Calculate the percentage rank of the employees across the entire
company based on salary and name (in case of ambiguity).
"""
qs = Employee.objects.annotate(
percent_rank=Window(
expression=PercentRank(),
order_by=[F("salary").asc(), F("name").asc()],
)
).order_by("percent_rank")
# Round to account for precision differences among databases.
self.assertQuerysetEqual(
qs,
[
("Moore", "IT", 34000, 0.0),
("Williams", "Accounting", 37000, 0.0909090909),
("Smith", "Marketing", 38000, 0.1818181818),
("Johnson", "Marketing", 40000, 0.2727272727),
("Jenson", "Accounting", 45000, 0.3636363636),
("Jones", "Accounting", 45000, 0.4545454545),
("Adams", "Accounting", 50000, 0.5454545455),
("Brown", "Sales", 53000, 0.6363636364),
("Smith", "Sales", 55000, 0.7272727273),
("Wilkinson", "IT", 60000, 0.8181818182),
("Johnson", "Management", 80000, 0.9090909091),
("Miller", "Management", 100000, 1.0),
],
transform=lambda row: (
row.name,
row.department,
row.salary,
round(row.percent_rank, 10),
),
)
def test_nth_returns_null(self):
"""
Find the nth row of the data set. None is returned since there are
fewer than 20 rows in the test data.
"""
qs = Employee.objects.annotate(
nth_value=Window(
expression=NthValue("salary", nth=20), order_by=F("salary").asc()
)
)
self.assertEqual(
list(qs.values_list("nth_value", flat=True).distinct()), [None]
)
def test_multiple_partitioning(self):
"""
Find the maximum salary for each department for people hired in the
same year.
"""
qs = Employee.objects.annotate(
max=Window(
expression=Max("salary"),
partition_by=[F("department"), F("hire_date__year")],
)
).order_by("department", "hire_date", "name")
self.assertQuerysetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 45000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 37000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 50000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 60000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 34000),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 100000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 100000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 38000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 40000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 55000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 53000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.max,
),
)
def test_multiple_ordering(self):
"""
Accumulate the salaries over the departments based on hire_date.
If two people were hired on the same date in the same department, the
ordering clause will render a different result for those people.
"""
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
partition_by="department",
order_by=[F("hire_date").asc(), F("name").asc()],
)
).order_by("department", "sum")
self.assertQuerysetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 90000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 127000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 177000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 60000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 94000),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 100000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 180000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 38000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 78000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 55000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 108000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.sum,
),
)
def test_related_ordering_with_count(self):
qs = Employee.objects.annotate(
department_sum=Window(
expression=Sum("salary"),
partition_by=F("department"),
order_by=["classification__code"],
)
)
self.assertEqual(qs.count(), 12)
def test_filter(self):
qs = Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
),
department_avg_age_diff=(
Window(Avg("age"), partition_by="department") - F("age")
),
).order_by("department", "name")
# Direct window reference.
self.assertQuerysetEqual(
qs.filter(department_salary_rank=1),
["Adams", "Wilkinson", "Miller", "Johnson", "Smith"],
lambda employee: employee.name,
)
# Through a combined expression containing a window.
self.assertQuerysetEqual(
qs.filter(department_avg_age_diff__gt=0),
["Jenson", "Jones", "Williams", "Miller", "Smith"],
lambda employee: employee.name,
)
# Intersection of multiple windows.
self.assertQuerysetEqual(
qs.filter(department_salary_rank=1, department_avg_age_diff__gt=0),
["Miller"],
lambda employee: employee.name,
)
# Union of multiple windows.
self.assertQuerysetEqual(
qs.filter(Q(department_salary_rank=1) | Q(department_avg_age_diff__gt=0)),
[
"Adams",
"Jenson",
"Jones",
"Williams",
"Wilkinson",
"Miller",
"Johnson",
"Smith",
"Smith",
],
lambda employee: employee.name,
)
def test_filter_conditional_annotation(self):
qs = (
Employee.objects.annotate(
rank=Window(Rank(), partition_by="department", order_by="-salary"),
case_first_rank=Case(
When(rank=1, then=True),
default=False,
),
q_first_rank=Q(rank=1),
)
.order_by("name")
.values_list("name", flat=True)
)
for annotation in ["case_first_rank", "q_first_rank"]:
with self.subTest(annotation=annotation):
self.assertSequenceEqual(
qs.filter(**{annotation: True}),
["Adams", "Johnson", "Miller", "Smith", "Wilkinson"],
)
def test_filter_conditional_expression(self):
qs = (
Employee.objects.filter(
Exact(Window(Rank(), partition_by="department", order_by="-salary"), 1)
)
.order_by("name")
.values_list("name", flat=True)
)
self.assertSequenceEqual(
qs, ["Adams", "Johnson", "Miller", "Smith", "Wilkinson"]
)
def test_filter_column_ref_rhs(self):
qs = (
Employee.objects.annotate(
max_dept_salary=Window(Max("salary"), partition_by="department")
)
.filter(max_dept_salary=F("salary"))
.order_by("name")
.values_list("name", flat=True)
)
self.assertSequenceEqual(
qs, ["Adams", "Johnson", "Miller", "Smith", "Wilkinson"]
)
def test_filter_values(self):
qs = (
Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
),
)
.order_by("department", "name")
.values_list(Upper("name"), flat=True)
)
self.assertSequenceEqual(
qs.filter(department_salary_rank=1),
["ADAMS", "WILKINSON", "MILLER", "JOHNSON", "SMITH"],
)
def test_filter_alias(self):
qs = Employee.objects.alias(
department_avg_age_diff=(
Window(Avg("age"), partition_by="department") - F("age")
),
).order_by("department", "name")
self.assertQuerysetEqual(
qs.filter(department_avg_age_diff__gt=0),
["Jenson", "Jones", "Williams", "Miller", "Smith"],
lambda employee: employee.name,
)
def test_filter_select_related(self):
qs = (
Employee.objects.alias(
department_avg_age_diff=(
Window(Avg("age"), partition_by="department") - F("age")
),
)
.select_related("classification")
.filter(department_avg_age_diff__gt=0)
.order_by("department", "name")
)
self.assertQuerysetEqual(
qs,
["Jenson", "Jones", "Williams", "Miller", "Smith"],
lambda employee: employee.name,
)
with self.assertNumQueries(0):
qs[0].classification
def test_exclude(self):
qs = Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
),
department_avg_age_diff=(
Window(Avg("age"), partition_by="department") - F("age")
),
).order_by("department", "name")
# Direct window reference.
self.assertQuerysetEqual(
qs.exclude(department_salary_rank__gt=1),
["Adams", "Wilkinson", "Miller", "Johnson", "Smith"],
lambda employee: employee.name,
)
# Through a combined expression containing a window.
self.assertQuerysetEqual(
qs.exclude(department_avg_age_diff__lte=0),
["Jenson", "Jones", "Williams", "Miller", "Smith"],
lambda employee: employee.name,
)
# Union of multiple windows.
self.assertQuerysetEqual(
qs.exclude(
Q(department_salary_rank__gt=1) | Q(department_avg_age_diff__lte=0)
),
["Miller"],
lambda employee: employee.name,
)
# Intersection of multiple windows.
self.assertQuerysetEqual(
qs.exclude(department_salary_rank__gt=1, department_avg_age_diff__lte=0),
[
"Adams",
"Jenson",
"Jones",
"Williams",
"Wilkinson",
"Miller",
"Johnson",
"Smith",
"Smith",
],
lambda employee: employee.name,
)
def test_heterogeneous_filter(self):
qs = (
Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
),
)
.order_by("name")
.values_list("name", flat=True)
)
# Heterogeneous filter between window function and aggregates pushes
# the WHERE clause to the QUALIFY outer query.
self.assertSequenceEqual(
qs.filter(
department_salary_rank=1, department__in=["Accounting", "Management"]
),
["Adams", "Miller"],
)
self.assertSequenceEqual(
qs.filter(
Q(department_salary_rank=1)
| Q(department__in=["Accounting", "Management"])
),
[
"Adams",
"Jenson",
"Johnson",
"Johnson",
"Jones",
"Miller",
"Smith",
"Wilkinson",
"Williams",
],
)
# Heterogeneous filter between window function and aggregates pushes
# the HAVING clause to the QUALIFY outer query.
qs = qs.annotate(past_department_count=Count("past_departments"))
self.assertSequenceEqual(
qs.filter(department_salary_rank=1, past_department_count__gte=1),
["Johnson", "Miller"],
)
self.assertSequenceEqual(
qs.filter(Q(department_salary_rank=1) | Q(past_department_count__gte=1)),
["Adams", "Johnson", "Miller", "Smith", "Wilkinson"],
)
def test_limited_filter(self):
"""
A query filtering against a window function have its limit applied
after window filtering takes place.
"""
self.assertQuerysetEqual(
Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
)
)
.filter(department_salary_rank=1)
.order_by("department")[0:3],
["Adams", "Wilkinson", "Miller"],
lambda employee: employee.name,
)
def test_filter_count(self):
self.assertEqual(
Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
)
)
.filter(department_salary_rank=1)
.count(),
5,
)
@skipUnlessDBFeature("supports_frame_range_fixed_distance")
def test_range_n_preceding_and_following(self):
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
order_by=F("salary").asc(),
partition_by="department",
frame=ValueRange(start=-2, end=2),
)
)
self.assertIn("RANGE BETWEEN 2 PRECEDING AND 2 FOLLOWING", str(qs.query))
self.assertQuerysetEqual(
qs,
[
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 37000),
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 90000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 90000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 50000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 53000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 55000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 40000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 38000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 60000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 34000),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 100000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 80000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.sum,
),
ordered=False,
)
def test_range_unbound(self):
"""A query with RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING."""
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
partition_by="age",
order_by=[F("age").asc()],
frame=ValueRange(start=None, end=None),
)
).order_by("department", "hire_date", "name")
self.assertIn(
"RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING", str(qs.query)
)
self.assertQuerysetEqual(
qs,
[
("Jones", "Accounting", 45000, datetime.date(2005, 11, 1), 165000),
("Jenson", "Accounting", 45000, datetime.date(2008, 4, 1), 165000),
("Williams", "Accounting", 37000, datetime.date(2009, 6, 1), 165000),
("Adams", "Accounting", 50000, datetime.date(2013, 7, 1), 130000),
("Wilkinson", "IT", 60000, datetime.date(2011, 3, 1), 194000),
("Moore", "IT", 34000, datetime.date(2013, 8, 1), 194000),
("Miller", "Management", 100000, datetime.date(2005, 6, 1), 194000),
("Johnson", "Management", 80000, datetime.date(2005, 7, 1), 130000),
("Smith", "Marketing", 38000, datetime.date(2009, 10, 1), 165000),
("Johnson", "Marketing", 40000, datetime.date(2012, 3, 1), 148000),
("Smith", "Sales", 55000, datetime.date(2007, 6, 1), 148000),
("Brown", "Sales", 53000, datetime.date(2009, 9, 1), 148000),
],
transform=lambda row: (
row.name,
row.department,
row.salary,
row.hire_date,
row.sum,
),
)
def test_subquery_row_range_rank(self):
qs = Employee.objects.annotate(
highest_avg_salary_date=Subquery(
Employee.objects.filter(
department=OuterRef("department"),
)
.annotate(
avg_salary=Window(
expression=Avg("salary"),
order_by=[F("hire_date").asc()],
frame=RowRange(start=-1, end=1),
),
)
.order_by("-avg_salary", "hire_date")
.values("hire_date")[:1],
),
).order_by("department", "name")
self.assertQuerysetEqual(
qs,
[
("Adams", "Accounting", datetime.date(2005, 11, 1)),
("Jenson", "Accounting", datetime.date(2005, 11, 1)),
("Jones", "Accounting", datetime.date(2005, 11, 1)),
("Williams", "Accounting", datetime.date(2005, 11, 1)),
("Moore", "IT", datetime.date(2011, 3, 1)),
("Wilkinson", "IT", datetime.date(2011, 3, 1)),
("Johnson", "Management", datetime.date(2005, 6, 1)),
("Miller", "Management", datetime.date(2005, 6, 1)),
("Johnson", "Marketing", datetime.date(2009, 10, 1)),
("Smith", "Marketing", datetime.date(2009, 10, 1)),
("Brown", "Sales", datetime.date(2007, 6, 1)),
("Smith", "Sales", datetime.date(2007, 6, 1)),
],
transform=lambda row: (
row.name,
row.department,
row.highest_avg_salary_date,
),
)
def test_row_range_rank(self):
"""
A query with ROWS BETWEEN UNBOUNDED PRECEDING AND 3 FOLLOWING.
The resulting sum is the sum of the three next (if they exist) and all
previous rows according to the ordering clause.
"""
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
frame=RowRange(start=None, end=3),
)
).order_by("sum", "hire_date")
self.assertIn("ROWS BETWEEN UNBOUNDED PRECEDING AND 3 FOLLOWING", str(qs.query))
self.assertQuerysetEqual(
qs,
[
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 280000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 325000),
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 362000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 415000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 453000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 513000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 553000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 603000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 637000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 637000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 637000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 637000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.sum,
),
)
@skipUnlessDBFeature("can_distinct_on_fields")
def test_distinct_window_function(self):
"""
Window functions are not aggregates, and hence a query to filter out
duplicates may be useful.
"""
qs = (
Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
partition_by=ExtractYear("hire_date"),
order_by=ExtractYear("hire_date"),
),
year=ExtractYear("hire_date"),
)
.filter(sum__gte=45000)
.values("year", "sum")
.distinct("year")
.order_by("year")
)
results = [
{"year": 2005, "sum": 225000},
{"year": 2007, "sum": 55000},
{"year": 2008, "sum": 45000},
{"year": 2009, "sum": 128000},
{"year": 2011, "sum": 60000},
{"year": 2013, "sum": 84000},
]
for idx, val in zip(range(len(results)), results):
with self.subTest(result=val):
self.assertEqual(qs[idx], val)
def test_fail_update(self):
"""Window expressions can't be used in an UPDATE statement."""
msg = (
"Window expressions are not allowed in this query (salary=<Window: "
"Max(Col(expressions_window_employee, expressions_window.Employee.salary)) "
"OVER (PARTITION BY Col(expressions_window_employee, "
"expressions_window.Employee.department))>)."
)
with self.assertRaisesMessage(FieldError, msg):
Employee.objects.filter(department="Management").update(
salary=Window(expression=Max("salary"), partition_by="department"),
)
def test_fail_insert(self):
"""Window expressions can't be used in an INSERT statement."""
msg = (
"Window expressions are not allowed in this query (salary=<Window: "
"Sum(Value(10000), order_by=OrderBy(F(pk), descending=False)) OVER ()"
)
with self.assertRaisesMessage(FieldError, msg):
Employee.objects.create(
name="Jameson",
department="Management",
hire_date=datetime.date(2007, 7, 1),
salary=Window(expression=Sum(Value(10000), order_by=F("pk").asc())),
)
def test_window_expression_within_subquery(self):
subquery_qs = Employee.objects.annotate(
highest=Window(
FirstValue("id"),
partition_by=F("department"),
order_by=F("salary").desc(),
)
).values("highest")
highest_salary = Employee.objects.filter(pk__in=subquery_qs)
self.assertCountEqual(
highest_salary.values("department", "salary"),
[
{"department": "Accounting", "salary": 50000},
{"department": "Sales", "salary": 55000},
{"department": "Marketing", "salary": 40000},
{"department": "IT", "salary": 60000},
{"department": "Management", "salary": 100000},
],
)
@skipUnlessDBFeature("supports_json_field")
def test_key_transform(self):
Detail.objects.bulk_create(
[
Detail(value={"department": "IT", "name": "Smith", "salary": 37000}),
Detail(value={"department": "IT", "name": "Nowak", "salary": 32000}),
Detail(value={"department": "HR", "name": "Brown", "salary": 50000}),
Detail(value={"department": "HR", "name": "Smith", "salary": 55000}),
Detail(value={"department": "PR", "name": "Moore", "salary": 90000}),
]
)
tests = [
(KeyTransform("department", "value"), KeyTransform("name", "value")),
(F("value__department"), F("value__name")),
]
for partition_by, order_by in tests:
with self.subTest(partition_by=partition_by, order_by=order_by):
qs = Detail.objects.annotate(
department_sum=Window(
expression=Sum(
Cast(
KeyTextTransform("salary", "value"),
output_field=IntegerField(),
)
),
partition_by=[partition_by],
order_by=[order_by],
)
).order_by("value__department", "department_sum")
self.assertQuerysetEqual(
qs,
[
("Brown", "HR", 50000, 50000),
("Smith", "HR", 55000, 105000),
("Nowak", "IT", 32000, 32000),
("Smith", "IT", 37000, 69000),
("Moore", "PR", 90000, 90000),
],
lambda entry: (
entry.value["name"],
entry.value["department"],
entry.value["salary"],
entry.department_sum,
),
)
def test_invalid_start_value_range(self):
msg = "start argument must be a negative integer, zero, or None, but got '3'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(start=3),
)
)
)
def test_invalid_end_value_range(self):
msg = "end argument must be a positive integer, zero, or None, but got '-3'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(end=-3),
)
)
)
def test_invalid_type_end_value_range(self):
msg = "end argument must be a positive integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(end="a"),
)
)
)
def test_invalid_type_start_value_range(self):
msg = "start argument must be a negative integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
frame=ValueRange(start="a"),
)
)
)
def test_invalid_type_end_row_range(self):
msg = "end argument must be a positive integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
frame=RowRange(end="a"),
)
)
)
@skipUnlessDBFeature("only_supports_unbounded_with_preceding_and_following")
def test_unsupported_range_frame_start(self):
msg = (
"%s only supports UNBOUNDED together with PRECEDING and FOLLOWING."
% connection.display_name
)
with self.assertRaisesMessage(NotSupportedError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(start=-1),
)
)
)
@skipUnlessDBFeature("only_supports_unbounded_with_preceding_and_following")
def test_unsupported_range_frame_end(self):
msg = (
"%s only supports UNBOUNDED together with PRECEDING and FOLLOWING."
% connection.display_name
)
with self.assertRaisesMessage(NotSupportedError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(end=1),
)
)
)
def test_invalid_type_start_row_range(self):
msg = "start argument must be a negative integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=RowRange(start="a"),
)
)
)
def test_invalid_filter(self):
msg = (
"Heterogeneous disjunctive predicates against window functions are not "
"implemented when performing conditional aggregation."
)
qs = Employee.objects.annotate(
window=Window(Rank()),
past_dept_cnt=Count("past_departments"),
)
with self.assertRaisesMessage(NotImplementedError, msg):
list(qs.filter(Q(window=1) | Q(department="Accounting")))
with self.assertRaisesMessage(NotImplementedError, msg):
list(qs.exclude(window=1, department="Accounting"))
class WindowUnsupportedTests(TestCase):
def test_unsupported_backend(self):
msg = "This backend does not support window expressions."
with mock.patch.object(connection.features, "supports_over_clause", False):
with self.assertRaisesMessage(NotSupportedError, msg):
Employee.objects.annotate(
dense_rank=Window(expression=DenseRank())
).get()
class NonQueryWindowTests(SimpleTestCase):
def test_window_repr(self):
self.assertEqual(
repr(Window(expression=Sum("salary"), partition_by="department")),
"<Window: Sum(F(salary)) OVER (PARTITION BY F(department))>",
)
self.assertEqual(
repr(Window(expression=Avg("salary"), order_by=F("department").asc())),
"<Window: Avg(F(salary)) OVER (OrderByList(OrderBy(F(department), "
"descending=False)))>",
)
def test_window_frame_repr(self):
self.assertEqual(
repr(RowRange(start=-1)),
"<RowRange: ROWS BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING>",
)
self.assertEqual(
repr(ValueRange(start=None, end=1)),
"<ValueRange: RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING>",
)
self.assertEqual(
repr(ValueRange(start=0, end=0)),
"<ValueRange: RANGE BETWEEN CURRENT ROW AND CURRENT ROW>",
)
self.assertEqual(
repr(RowRange(start=0, end=0)),
"<RowRange: ROWS BETWEEN CURRENT ROW AND CURRENT ROW>",
)
def test_empty_group_by_cols(self):
window = Window(expression=Sum("pk"))
self.assertEqual(window.get_group_by_cols(), [])
self.assertFalse(window.contains_aggregate)
def test_frame_empty_group_by_cols(self):
frame = WindowFrame()
self.assertEqual(frame.get_group_by_cols(), [])
def test_frame_window_frame_notimplemented(self):
frame = WindowFrame()
msg = "Subclasses must implement window_frame_start_end()."
with self.assertRaisesMessage(NotImplementedError, msg):
frame.window_frame_start_end(None, None, None)
def test_invalid_order_by(self):
msg = (
"Window.order_by must be either a string reference to a field, an "
"expression, or a list or tuple of them."
)
with self.assertRaisesMessage(ValueError, msg):
Window(expression=Sum("power"), order_by={"-horse"})
def test_invalid_source_expression(self):
msg = "Expression 'Upper' isn't compatible with OVER clauses."
with self.assertRaisesMessage(ValueError, msg):
Window(expression=Upper("name"))
|
5e4dbbdb82ed4af7a7357b29905ddaa865d1378792a24b255e7d9d6c1060f01f | from unittest import mock
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import NotSupportedError, connection
from django.db.models import Prefetch, QuerySet, prefetch_related_objects
from django.db.models.query import get_prefetcher
from django.db.models.sql import Query
from django.test import (
TestCase,
override_settings,
skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext, ignore_warnings
from django.utils.deprecation import RemovedInDjango50Warning
from .models import (
Article,
Author,
Author2,
AuthorAddress,
AuthorWithAge,
Bio,
Book,
Bookmark,
BookReview,
BookWithYear,
Comment,
Department,
Employee,
FavoriteAuthors,
House,
LessonEntry,
ModelIterableSubclass,
Person,
Qualification,
Reader,
Room,
TaggedItem,
Teacher,
WordEntry,
)
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title="Poems")
cls.book2 = Book.objects.create(title="Jane Eyre")
cls.book3 = Book.objects.create(title="Wuthering Heights")
cls.book4 = Book.objects.create(title="Sense and Sensibility")
cls.author1 = Author.objects.create(name="Charlotte", first_book=cls.book1)
cls.author2 = Author.objects.create(name="Anne", first_book=cls.book1)
cls.author3 = Author.objects.create(name="Emily", first_book=cls.book1)
cls.author4 = Author.objects.create(name="Jane", first_book=cls.book4)
cls.book1.authors.add(cls.author1, cls.author2, cls.author3)
cls.book2.authors.add(cls.author1)
cls.book3.authors.add(cls.author3)
cls.book4.authors.add(cls.author4)
cls.reader1 = Reader.objects.create(name="Amy")
cls.reader2 = Reader.objects.create(name="Belinda")
cls.reader1.books_read.add(cls.book1, cls.book4)
cls.reader2.books_read.add(cls.book2, cls.book4)
class PrefetchRelatedTests(TestDataMixin, TestCase):
def assertWhereContains(self, sql, needle):
where_idx = sql.index("WHERE")
self.assertEqual(
sql.count(str(needle), where_idx),
1,
msg="WHERE clause doesn't contain %s, actual SQL: %s"
% (needle, sql[where_idx:]),
)
def test_m2m_forward(self):
with self.assertNumQueries(2):
lists = [
list(b.authors.all()) for b in Book.objects.prefetch_related("authors")
]
normal_lists = [list(b.authors.all()) for b in Book.objects.all()]
self.assertEqual(lists, normal_lists)
def test_m2m_reverse(self):
with self.assertNumQueries(2):
lists = [
list(a.books.all()) for a in Author.objects.prefetch_related("books")
]
normal_lists = [list(a.books.all()) for a in Author.objects.all()]
self.assertEqual(lists, normal_lists)
def test_foreignkey_forward(self):
with self.assertNumQueries(2):
books = [
a.first_book for a in Author.objects.prefetch_related("first_book")
]
normal_books = [a.first_book for a in Author.objects.all()]
self.assertEqual(books, normal_books)
def test_foreignkey_reverse(self):
with self.assertNumQueries(2):
[
list(b.first_time_authors.all())
for b in Book.objects.prefetch_related("first_time_authors")
]
self.assertSequenceEqual(self.book2.authors.all(), [self.author1])
def test_onetoone_reverse_no_match(self):
# Regression for #17439
with self.assertNumQueries(2):
book = Book.objects.prefetch_related("bookwithyear").all()[0]
with self.assertNumQueries(0):
with self.assertRaises(BookWithYear.DoesNotExist):
book.bookwithyear
def test_onetoone_reverse_with_to_field_pk(self):
"""
A model (Bio) with a OneToOneField primary key (author) that references
a non-pk field (name) on the related model (Author) is prefetchable.
"""
Bio.objects.bulk_create(
[
Bio(author=self.author1),
Bio(author=self.author2),
Bio(author=self.author3),
]
)
authors = Author.objects.filter(
name__in=[self.author1, self.author2, self.author3],
).prefetch_related("bio")
with self.assertNumQueries(2):
for author in authors:
self.assertEqual(author.name, author.bio.author.name)
def test_survives_clone(self):
with self.assertNumQueries(2):
[
list(b.first_time_authors.all())
for b in Book.objects.prefetch_related("first_time_authors").exclude(
id=1000
)
]
def test_len(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related("first_time_authors")
len(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_bool(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related("first_time_authors")
bool(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_count(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related("first_time_authors")
[b.first_time_authors.count() for b in qs]
def test_exists(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related("first_time_authors")
[b.first_time_authors.exists() for b in qs]
def test_in_and_prefetch_related(self):
"""
Regression test for #20242 - QuerySet "in" didn't work the first time
when using prefetch_related. This was fixed by the removal of chunked
reads from QuerySet iteration in
70679243d1786e03557c28929f9762a119e3ac14.
"""
qs = Book.objects.prefetch_related("first_time_authors")
self.assertIn(qs[0], qs)
def test_clear(self):
with self.assertNumQueries(5):
with_prefetch = Author.objects.prefetch_related("books")
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
def test_m2m_then_m2m(self):
"""A m2m can be followed through another m2m."""
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related("books__read_by")
lists = [
[[str(r) for r in b.read_by.all()] for b in a.books.all()] for a in qs
]
self.assertEqual(
lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
],
)
def test_overriding_prefetch(self):
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related("books", "books__read_by")
lists = [
[[str(r) for r in b.read_by.all()] for b in a.books.all()] for a in qs
]
self.assertEqual(
lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
],
)
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related("books__read_by", "books")
lists = [
[[str(r) for r in b.read_by.all()] for b in a.books.all()] for a in qs
]
self.assertEqual(
lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
],
)
def test_get(self):
"""
Objects retrieved with .get() get the prefetch behavior.
"""
# Need a double
with self.assertNumQueries(3):
author = Author.objects.prefetch_related("books__read_by").get(
name="Charlotte"
)
lists = [[str(r) for r in b.read_by.all()] for b in author.books.all()]
self.assertEqual(lists, [["Amy"], ["Belinda"]]) # Poems, Jane Eyre
def test_foreign_key_then_m2m(self):
"""
A m2m relation can be followed after a relation like ForeignKey that
doesn't have many objects.
"""
with self.assertNumQueries(2):
qs = Author.objects.select_related("first_book").prefetch_related(
"first_book__read_by"
)
lists = [[str(r) for r in a.first_book.read_by.all()] for a in qs]
self.assertEqual(lists, [["Amy"], ["Amy"], ["Amy"], ["Amy", "Belinda"]])
def test_reverse_one_to_one_then_m2m(self):
"""
A m2m relation can be followed after going through the select_related
reverse of an o2o.
"""
qs = Author.objects.prefetch_related("bio__books").select_related("bio")
with self.assertNumQueries(1):
list(qs.all())
Bio.objects.create(author=self.author1)
with self.assertNumQueries(2):
list(qs.all())
def test_attribute_error(self):
qs = Reader.objects.prefetch_related("books_read__xyz")
msg = (
"Cannot find 'xyz' on Book object, 'books_read__xyz' "
"is an invalid parameter to prefetch_related()"
)
with self.assertRaisesMessage(AttributeError, msg) as cm:
list(qs)
self.assertIn("prefetch_related", str(cm.exception))
def test_invalid_final_lookup(self):
qs = Book.objects.prefetch_related("authors__name")
msg = (
"'authors__name' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to prefetch_related()."
)
with self.assertRaisesMessage(ValueError, msg) as cm:
list(qs)
self.assertIn("prefetch_related", str(cm.exception))
self.assertIn("name", str(cm.exception))
def test_prefetch_eq(self):
prefetch_1 = Prefetch("authors", queryset=Author.objects.all())
prefetch_2 = Prefetch("books", queryset=Book.objects.all())
self.assertEqual(prefetch_1, prefetch_1)
self.assertEqual(prefetch_1, mock.ANY)
self.assertNotEqual(prefetch_1, prefetch_2)
def test_forward_m2m_to_attr_conflict(self):
msg = "to_attr=authors conflicts with a field on the Book model."
authors = Author.objects.all()
with self.assertRaisesMessage(ValueError, msg):
list(
Book.objects.prefetch_related(
Prefetch("authors", queryset=authors, to_attr="authors"),
)
)
# Without the ValueError, an author was deleted due to the implicit
# save of the relation assignment.
self.assertEqual(self.book1.authors.count(), 3)
def test_reverse_m2m_to_attr_conflict(self):
msg = "to_attr=books conflicts with a field on the Author model."
poems = Book.objects.filter(title="Poems")
with self.assertRaisesMessage(ValueError, msg):
list(
Author.objects.prefetch_related(
Prefetch("books", queryset=poems, to_attr="books"),
)
)
# Without the ValueError, a book was deleted due to the implicit
# save of reverse relation assignment.
self.assertEqual(self.author1.books.count(), 2)
def test_m2m_then_reverse_fk_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related("authors__addresses"))
sql = queries[-1]["sql"]
self.assertWhereContains(sql, self.author1.name)
def test_m2m_then_m2m_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related("authors__favorite_authors"))
sql = queries[-1]["sql"]
self.assertWhereContains(sql, self.author1.name)
def test_m2m_then_reverse_one_to_one_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related("authors__authorwithage"))
sql = queries[-1]["sql"]
self.assertWhereContains(sql, self.author1.id)
def test_filter_deferred(self):
"""
Related filtering of prefetched querysets is deferred on m2m and
reverse m2o relations until necessary.
"""
add_q = Query.add_q
for relation in ["authors", "first_time_authors"]:
with self.subTest(relation=relation):
with mock.patch.object(
Query,
"add_q",
autospec=True,
side_effect=lambda self, q: add_q(self, q),
) as add_q_mock:
list(Book.objects.prefetch_related(relation))
self.assertEqual(add_q_mock.call_count, 1)
def test_named_values_list(self):
qs = Author.objects.prefetch_related("books")
self.assertCountEqual(
[value.name for value in qs.values_list("name", named=True)],
["Anne", "Charlotte", "Emily", "Jane"],
)
def test_m2m_prefetching_iterator_with_chunks(self):
with self.assertNumQueries(3):
authors = [
b.authors.first()
for b in Book.objects.prefetch_related("authors").iterator(chunk_size=2)
]
self.assertEqual(
authors,
[self.author1, self.author1, self.author3, self.author4],
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_m2m_prefetching_iterator_without_chunks(self):
# prefetch_related() is ignored.
with self.assertNumQueries(5):
authors = [
b.authors.first()
for b in Book.objects.prefetch_related("authors").iterator()
]
self.assertEqual(
authors,
[self.author1, self.author1, self.author3, self.author4],
)
def test_m2m_prefetching_iterator_without_chunks_warning(self):
msg = (
"Using QuerySet.iterator() after prefetch_related() without "
"specifying chunk_size is deprecated."
)
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
Book.objects.prefetch_related("authors").iterator()
class RawQuerySetTests(TestDataMixin, TestCase):
def test_basic(self):
with self.assertNumQueries(2):
books = Book.objects.raw(
"SELECT * FROM prefetch_related_book WHERE id = %s", (self.book1.id,)
).prefetch_related("authors")
book1 = list(books)[0]
with self.assertNumQueries(0):
self.assertCountEqual(
book1.authors.all(), [self.author1, self.author2, self.author3]
)
def test_prefetch_before_raw(self):
with self.assertNumQueries(2):
books = Book.objects.prefetch_related("authors").raw(
"SELECT * FROM prefetch_related_book WHERE id = %s", (self.book1.id,)
)
book1 = list(books)[0]
with self.assertNumQueries(0):
self.assertCountEqual(
book1.authors.all(), [self.author1, self.author2, self.author3]
)
def test_clear(self):
with self.assertNumQueries(5):
with_prefetch = Author.objects.raw(
"SELECT * FROM prefetch_related_author"
).prefetch_related("books")
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
class CustomPrefetchTests(TestCase):
@classmethod
def traverse_qs(cls, obj_iter, path):
"""
Helper method that returns a list containing a list of the objects in the
obj_iter. Then for each object in the obj_iter, the path will be
recursively travelled and the found objects are added to the return value.
"""
ret_val = []
if hasattr(obj_iter, "all"):
obj_iter = obj_iter.all()
try:
iter(obj_iter)
except TypeError:
obj_iter = [obj_iter]
for obj in obj_iter:
rel_objs = []
for part in path:
if not part:
continue
try:
related = getattr(obj, part[0])
except ObjectDoesNotExist:
continue
if related is not None:
rel_objs.extend(cls.traverse_qs(related, [part[1:]]))
ret_val.append((obj, rel_objs))
return ret_val
@classmethod
def setUpTestData(cls):
cls.person1 = Person.objects.create(name="Joe")
cls.person2 = Person.objects.create(name="Mary")
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
cls.house1 = House.objects.create(
name="House 1", address="123 Main St", owner=cls.person1
)
cls.room1_1 = Room.objects.create(name="Dining room", house=cls.house1)
cls.room1_2 = Room.objects.create(name="Lounge", house=cls.house1)
cls.room1_3 = Room.objects.create(name="Kitchen", house=cls.house1)
cls.house1.main_room = cls.room1_1
cls.house1.save()
cls.person1.houses.add(cls.house1)
cls.house2 = House.objects.create(
name="House 2", address="45 Side St", owner=cls.person1
)
cls.room2_1 = Room.objects.create(name="Dining room", house=cls.house2)
cls.room2_2 = Room.objects.create(name="Lounge", house=cls.house2)
cls.room2_3 = Room.objects.create(name="Kitchen", house=cls.house2)
cls.house2.main_room = cls.room2_1
cls.house2.save()
cls.person1.houses.add(cls.house2)
cls.house3 = House.objects.create(
name="House 3", address="6 Downing St", owner=cls.person2
)
cls.room3_1 = Room.objects.create(name="Dining room", house=cls.house3)
cls.room3_2 = Room.objects.create(name="Lounge", house=cls.house3)
cls.room3_3 = Room.objects.create(name="Kitchen", house=cls.house3)
cls.house3.main_room = cls.room3_1
cls.house3.save()
cls.person2.houses.add(cls.house3)
cls.house4 = House.objects.create(
name="house 4", address="7 Regents St", owner=cls.person2
)
cls.room4_1 = Room.objects.create(name="Dining room", house=cls.house4)
cls.room4_2 = Room.objects.create(name="Lounge", house=cls.house4)
cls.room4_3 = Room.objects.create(name="Kitchen", house=cls.house4)
cls.house4.main_room = cls.room4_1
cls.house4.save()
cls.person2.houses.add(cls.house4)
def test_traverse_qs(self):
qs = Person.objects.prefetch_related("houses")
related_objs_normal = ([list(p.houses.all()) for p in qs],)
related_objs_from_traverse = [
[inner[0] for inner in o[1]] for o in self.traverse_qs(qs, [["houses"]])
]
self.assertEqual(related_objs_normal, (related_objs_from_traverse,))
def test_ambiguous(self):
# Ambiguous: Lookup was already seen with a different queryset.
msg = (
"'houses' lookup was already seen with a different queryset. You "
"may need to adjust the ordering of your lookups."
)
# lookup.queryset shouldn't be evaluated.
with self.assertNumQueries(3):
with self.assertRaisesMessage(ValueError, msg):
self.traverse_qs(
Person.objects.prefetch_related(
"houses__rooms",
Prefetch("houses", queryset=House.objects.all()),
),
[["houses", "rooms"]],
)
# Ambiguous: Lookup houses_lst doesn't yet exist when performing
# houses_lst__rooms.
msg = (
"Cannot find 'houses_lst' on Person object, 'houses_lst__rooms' is "
"an invalid parameter to prefetch_related()"
)
with self.assertRaisesMessage(AttributeError, msg):
self.traverse_qs(
Person.objects.prefetch_related(
"houses_lst__rooms",
Prefetch(
"houses", queryset=House.objects.all(), to_attr="houses_lst"
),
),
[["houses", "rooms"]],
)
# Not ambiguous.
self.traverse_qs(
Person.objects.prefetch_related("houses__rooms", "houses"),
[["houses", "rooms"]],
)
self.traverse_qs(
Person.objects.prefetch_related(
"houses__rooms",
Prefetch("houses", queryset=House.objects.all(), to_attr="houses_lst"),
),
[["houses", "rooms"]],
)
def test_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
Person.objects.prefetch_related("houses"), [["houses"]]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch("houses")), [["houses"]]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch("houses", to_attr="houses_lst")
),
[["houses_lst"]],
)
self.assertEqual(lst1, lst2)
def test_reverse_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
House.objects.prefetch_related("occupants"), [["occupants"]]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch("occupants")), [["occupants"]]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(
Prefetch("occupants", to_attr="occupants_lst")
),
[["occupants_lst"]],
)
self.assertEqual(lst1, lst2)
def test_m2m_through_fk(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Room.objects.prefetch_related("house__occupants"),
[["house", "occupants"]],
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch("house__occupants")),
[["house", "occupants"]],
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(
Prefetch("house__occupants", to_attr="occupants_lst")
),
[["house", "occupants_lst"]],
)
self.assertEqual(lst1, lst2)
def test_m2m_through_gfk(self):
TaggedItem.objects.create(tag="houses", content_object=self.house1)
TaggedItem.objects.create(tag="houses", content_object=self.house2)
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
TaggedItem.objects.filter(tag="houses").prefetch_related(
"content_object__rooms"
),
[["content_object", "rooms"]],
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
TaggedItem.objects.prefetch_related(
Prefetch("content_object"),
Prefetch("content_object__rooms", to_attr="rooms_lst"),
),
[["content_object", "rooms_lst"]],
)
self.assertEqual(lst1, lst2)
def test_o2m_through_m2m(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Person.objects.prefetch_related("houses", "houses__rooms"),
[["houses", "rooms"]],
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch("houses"), "houses__rooms"),
[["houses", "rooms"]],
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch("houses"), Prefetch("houses__rooms")
),
[["houses", "rooms"]],
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch("houses", to_attr="houses_lst"), "houses_lst__rooms"
),
[["houses_lst", "rooms"]],
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch("houses", to_attr="houses_lst"),
Prefetch("houses_lst__rooms", to_attr="rooms_lst"),
),
[["houses_lst", "rooms_lst"]],
)
self.assertEqual(lst1, lst2)
def test_generic_rel(self):
bookmark = Bookmark.objects.create(url="http://www.djangoproject.com/")
TaggedItem.objects.create(content_object=bookmark, tag="django")
TaggedItem.objects.create(
content_object=bookmark, favorite=bookmark, tag="python"
)
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Bookmark.objects.prefetch_related(
"tags", "tags__content_object", "favorite_tags"
),
[["tags", "content_object"], ["favorite_tags"]],
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Bookmark.objects.prefetch_related(
Prefetch("tags", to_attr="tags_lst"),
Prefetch("tags_lst__content_object"),
Prefetch("favorite_tags"),
),
[["tags_lst", "content_object"], ["favorite_tags"]],
)
self.assertEqual(lst1, lst2)
def test_traverse_single_item_property(self):
# Control lookups.
with self.assertNumQueries(5):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
"houses__rooms",
"primary_house__occupants__houses",
),
[["primary_house", "occupants", "houses"]],
)
# Test lookups.
with self.assertNumQueries(5):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
"houses__rooms",
Prefetch("primary_house__occupants", to_attr="occupants_lst"),
"primary_house__occupants_lst__houses",
),
[["primary_house", "occupants_lst", "houses"]],
)
self.assertEqual(lst1, lst2)
def test_traverse_multiple_items_property(self):
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
"houses",
"all_houses__occupants__houses",
),
[["all_houses", "occupants", "houses"]],
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
"houses",
Prefetch("all_houses__occupants", to_attr="occupants_lst"),
"all_houses__occupants_lst__houses",
),
[["all_houses", "occupants_lst", "houses"]],
)
self.assertEqual(lst1, lst2)
def test_custom_qs(self):
# Test basic.
with self.assertNumQueries(2):
lst1 = list(Person.objects.prefetch_related("houses"))
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
"houses", queryset=House.objects.all(), to_attr="houses_lst"
)
)
)
self.assertEqual(
self.traverse_qs(lst1, [["houses"]]),
self.traverse_qs(lst2, [["houses_lst"]]),
)
# Test queryset filtering.
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
"houses",
queryset=House.objects.filter(
pk__in=[self.house1.pk, self.house3.pk]
),
to_attr="houses_lst",
)
)
)
self.assertEqual(len(lst2[0].houses_lst), 1)
self.assertEqual(lst2[0].houses_lst[0], self.house1)
self.assertEqual(len(lst2[1].houses_lst), 1)
self.assertEqual(lst2[1].houses_lst[0], self.house3)
# Test flattened.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related("houses__rooms"))
with self.assertNumQueries(3):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
"houses__rooms",
queryset=Room.objects.all(),
to_attr="rooms_lst",
)
)
)
self.assertEqual(
self.traverse_qs(lst1, [["houses", "rooms"]]),
self.traverse_qs(lst2, [["houses", "rooms_lst"]]),
)
# Test inner select_related.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related("houses__owner"))
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch("houses", queryset=House.objects.select_related("owner"))
)
)
self.assertEqual(
self.traverse_qs(lst1, [["houses", "owner"]]),
self.traverse_qs(lst2, [["houses", "owner"]]),
)
# Test inner prefetch.
inner_rooms_qs = Room.objects.filter(pk__in=[self.room1_1.pk, self.room1_2.pk])
houses_qs_prf = House.objects.prefetch_related(
Prefetch("rooms", queryset=inner_rooms_qs, to_attr="rooms_lst")
)
with self.assertNumQueries(4):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
"houses",
queryset=houses_qs_prf.filter(pk=self.house1.pk),
to_attr="houses_lst",
),
Prefetch("houses_lst__rooms_lst__main_room_of"),
)
)
self.assertEqual(len(lst2[0].houses_lst[0].rooms_lst), 2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0], self.room1_1)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[1], self.room1_2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0].main_room_of, self.house1)
self.assertEqual(len(lst2[1].houses_lst), 0)
# Test ForwardManyToOneDescriptor.
houses = House.objects.select_related("owner")
with self.assertNumQueries(6):
rooms = Room.objects.prefetch_related("house")
lst1 = self.traverse_qs(rooms, [["house", "owner"]])
with self.assertNumQueries(2):
rooms = Room.objects.prefetch_related(Prefetch("house", queryset=houses))
lst2 = self.traverse_qs(rooms, [["house", "owner"]])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
houses = House.objects.select_related("owner")
rooms = Room.objects.prefetch_related(
Prefetch("house", queryset=houses, to_attr="house_attr")
)
lst2 = self.traverse_qs(rooms, [["house_attr", "owner"]])
self.assertEqual(lst1, lst2)
room = Room.objects.prefetch_related(
Prefetch("house", queryset=houses.filter(address="DoesNotExist"))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, "house")
room = Room.objects.prefetch_related(
Prefetch(
"house",
queryset=houses.filter(address="DoesNotExist"),
to_attr="house_attr",
)
).first()
self.assertIsNone(room.house_attr)
rooms = Room.objects.prefetch_related(
Prefetch("house", queryset=House.objects.only("name"))
)
with self.assertNumQueries(2):
getattr(rooms.first().house, "name")
with self.assertNumQueries(3):
getattr(rooms.first().house, "address")
# Test ReverseOneToOneDescriptor.
houses = House.objects.select_related("owner")
with self.assertNumQueries(6):
rooms = Room.objects.prefetch_related("main_room_of")
lst1 = self.traverse_qs(rooms, [["main_room_of", "owner"]])
with self.assertNumQueries(2):
rooms = Room.objects.prefetch_related(
Prefetch("main_room_of", queryset=houses)
)
lst2 = self.traverse_qs(rooms, [["main_room_of", "owner"]])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
rooms = list(
Room.objects.prefetch_related(
Prefetch(
"main_room_of",
queryset=houses,
to_attr="main_room_of_attr",
)
)
)
lst2 = self.traverse_qs(rooms, [["main_room_of_attr", "owner"]])
self.assertEqual(lst1, lst2)
room = (
Room.objects.filter(main_room_of__isnull=False)
.prefetch_related(
Prefetch("main_room_of", queryset=houses.filter(address="DoesNotExist"))
)
.first()
)
with self.assertRaises(ObjectDoesNotExist):
getattr(room, "main_room_of")
room = (
Room.objects.filter(main_room_of__isnull=False)
.prefetch_related(
Prefetch(
"main_room_of",
queryset=houses.filter(address="DoesNotExist"),
to_attr="main_room_of_attr",
)
)
.first()
)
self.assertIsNone(room.main_room_of_attr)
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
person = Person.objects.prefetch_related(
Prefetch("houses", queryset=House.objects.filter(name="House 1")),
).get(pk=self.person1.pk)
self.assertEqual(
list(person.houses.all()),
list(person.houses.all().all()),
)
def test_nested_prefetch_related_are_not_overwritten(self):
# Regression test for #24873
houses_2 = House.objects.prefetch_related(Prefetch("rooms"))
persons = Person.objects.prefetch_related(Prefetch("houses", queryset=houses_2))
houses = House.objects.prefetch_related(Prefetch("occupants", queryset=persons))
list(houses) # queryset must be evaluated once to reproduce the bug.
self.assertEqual(
houses.all()[0].occupants.all()[0].houses.all()[1].rooms.all()[0],
self.room2_1,
)
def test_nested_prefetch_related_with_duplicate_prefetcher(self):
"""
Nested prefetches whose name clashes with descriptor names
(Person.houses here) are allowed.
"""
occupants = Person.objects.prefetch_related(
Prefetch("houses", to_attr="some_attr_name"),
Prefetch("houses", queryset=House.objects.prefetch_related("main_room")),
)
houses = House.objects.prefetch_related(
Prefetch("occupants", queryset=occupants)
)
with self.assertNumQueries(5):
self.traverse_qs(list(houses), [["occupants", "houses", "main_room"]])
def test_values_queryset(self):
msg = "Prefetch querysets cannot use raw(), values(), and values_list()."
with self.assertRaisesMessage(ValueError, msg):
Prefetch("houses", House.objects.values("pk"))
with self.assertRaisesMessage(ValueError, msg):
Prefetch("houses", House.objects.values_list("pk"))
# That error doesn't affect managers with custom ModelIterable subclasses
self.assertIs(
Teacher.objects_custom.all()._iterable_class, ModelIterableSubclass
)
Prefetch("teachers", Teacher.objects_custom.all())
def test_raw_queryset(self):
msg = "Prefetch querysets cannot use raw(), values(), and values_list()."
with self.assertRaisesMessage(ValueError, msg):
Prefetch("houses", House.objects.raw("select pk from house"))
def test_to_attr_doesnt_cache_through_attr_as_list(self):
house = House.objects.prefetch_related(
Prefetch("rooms", queryset=Room.objects.all(), to_attr="to_rooms"),
).get(pk=self.house3.pk)
self.assertIsInstance(house.rooms.all(), QuerySet)
def test_to_attr_cached_property(self):
persons = Person.objects.prefetch_related(
Prefetch("houses", House.objects.all(), to_attr="cached_all_houses"),
)
for person in persons:
# To bypass caching at the related descriptor level, don't use
# person.houses.all() here.
all_houses = list(House.objects.filter(occupants=person))
with self.assertNumQueries(0):
self.assertEqual(person.cached_all_houses, all_houses)
def test_filter_deferred(self):
"""
Related filtering of prefetched querysets is deferred until necessary.
"""
add_q = Query.add_q
with mock.patch.object(
Query,
"add_q",
autospec=True,
side_effect=lambda self, q: add_q(self, q),
) as add_q_mock:
list(
House.objects.prefetch_related(
Prefetch("occupants", queryset=Person.objects.all())
)
)
self.assertEqual(add_q_mock.call_count, 1)
class DefaultManagerTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.qual1 = Qualification.objects.create(name="BA")
cls.qual2 = Qualification.objects.create(name="BSci")
cls.qual3 = Qualification.objects.create(name="MA")
cls.qual4 = Qualification.objects.create(name="PhD")
cls.teacher1 = Teacher.objects.create(name="Mr Cleese")
cls.teacher2 = Teacher.objects.create(name="Mr Idle")
cls.teacher3 = Teacher.objects.create(name="Mr Chapman")
cls.teacher1.qualifications.add(cls.qual1, cls.qual2, cls.qual3, cls.qual4)
cls.teacher2.qualifications.add(cls.qual1)
cls.teacher3.qualifications.add(cls.qual2)
cls.dept1 = Department.objects.create(name="English")
cls.dept2 = Department.objects.create(name="Physics")
cls.dept1.teachers.add(cls.teacher1, cls.teacher2)
cls.dept2.teachers.add(cls.teacher1, cls.teacher3)
def test_m2m_then_m2m(self):
with self.assertNumQueries(3):
# When we prefetch the teachers, and force the query, we don't want
# the default manager on teachers to immediately get all the related
# qualifications, since this will do one query per teacher.
qs = Department.objects.prefetch_related("teachers")
depts = "".join(
"%s department: %s\n"
% (dept.name, ", ".join(str(t) for t in dept.teachers.all()))
for dept in qs
)
self.assertEqual(
depts,
"English department: Mr Cleese (BA, BSci, MA, PhD), Mr Idle (BA)\n"
"Physics department: Mr Cleese (BA, BSci, MA, PhD), Mr Chapman "
"(BSci)\n",
)
class GenericRelationTests(TestCase):
@classmethod
def setUpTestData(cls):
book1 = Book.objects.create(title="Winnie the Pooh")
book2 = Book.objects.create(title="Do you like green eggs and spam?")
book3 = Book.objects.create(title="Three Men In A Boat")
reader1 = Reader.objects.create(name="me")
reader2 = Reader.objects.create(name="you")
reader3 = Reader.objects.create(name="someone")
book1.read_by.add(reader1, reader2)
book2.read_by.add(reader2)
book3.read_by.add(reader3)
cls.book1, cls.book2, cls.book3 = book1, book2, book3
cls.reader1, cls.reader2, cls.reader3 = reader1, reader2, reader3
def test_prefetch_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="great", content_object=self.reader1)
TaggedItem.objects.create(tag="outstanding", content_object=self.book2)
TaggedItem.objects.create(tag="amazing", content_object=self.reader3)
# 1 for TaggedItem table, 1 for Book table, 1 for Reader table
with self.assertNumQueries(3):
qs = TaggedItem.objects.prefetch_related("content_object")
list(qs)
def test_prefetch_GFK_nonint_pk(self):
Comment.objects.create(comment="awesome", content_object=self.book1)
# 1 for Comment table, 1 for Book table
with self.assertNumQueries(2):
qs = Comment.objects.prefetch_related("content_object")
[c.content_object for c in qs]
def test_prefetch_GFK_uuid_pk(self):
article = Article.objects.create(name="Django")
Comment.objects.create(comment="awesome", content_object_uuid=article)
qs = Comment.objects.prefetch_related("content_object_uuid")
self.assertEqual([c.content_object_uuid for c in qs], [article])
def test_prefetch_GFK_fk_pk(self):
book = Book.objects.create(title="Poems")
book_with_year = BookWithYear.objects.create(book=book, published_year=2019)
Comment.objects.create(comment="awesome", content_object=book_with_year)
qs = Comment.objects.prefetch_related("content_object")
self.assertEqual([c.content_object for c in qs], [book_with_year])
def test_traverse_GFK(self):
"""
A 'content_object' can be traversed with prefetch_related() and
get to related objects on the other side (assuming it is suitably
filtered)
"""
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="awesome", content_object=self.book2)
TaggedItem.objects.create(tag="awesome", content_object=self.book3)
TaggedItem.objects.create(tag="awesome", content_object=self.reader1)
TaggedItem.objects.create(tag="awesome", content_object=self.reader2)
ct = ContentType.objects.get_for_model(Book)
# We get 3 queries - 1 for main query, 1 for content_objects since they
# all use the same table, and 1 for the 'read_by' relation.
with self.assertNumQueries(3):
# If we limit to books, we know that they will have 'read_by'
# attributes, so the following makes sense:
qs = TaggedItem.objects.filter(
content_type=ct, tag="awesome"
).prefetch_related("content_object__read_by")
readers_of_awesome_books = {
r.name for tag in qs for r in tag.content_object.read_by.all()
}
self.assertEqual(readers_of_awesome_books, {"me", "you", "someone"})
def test_nullable_GFK(self):
TaggedItem.objects.create(
tag="awesome", content_object=self.book1, created_by=self.reader1
)
TaggedItem.objects.create(tag="great", content_object=self.book2)
TaggedItem.objects.create(tag="rubbish", content_object=self.book3)
with self.assertNumQueries(2):
result = [
t.created_by for t in TaggedItem.objects.prefetch_related("created_by")
]
self.assertEqual(result, [t.created_by for t in TaggedItem.objects.all()])
def test_generic_relation(self):
bookmark = Bookmark.objects.create(url="http://www.djangoproject.com/")
TaggedItem.objects.create(content_object=bookmark, tag="django")
TaggedItem.objects.create(content_object=bookmark, tag="python")
with self.assertNumQueries(2):
tags = [
t.tag
for b in Bookmark.objects.prefetch_related("tags")
for t in b.tags.all()
]
self.assertEqual(sorted(tags), ["django", "python"])
def test_charfield_GFK(self):
b = Bookmark.objects.create(url="http://www.djangoproject.com/")
TaggedItem.objects.create(content_object=b, tag="django")
TaggedItem.objects.create(content_object=b, favorite=b, tag="python")
with self.assertNumQueries(3):
bookmark = Bookmark.objects.filter(pk=b.pk).prefetch_related(
"tags", "favorite_tags"
)[0]
self.assertEqual(
sorted(i.tag for i in bookmark.tags.all()), ["django", "python"]
)
self.assertEqual([i.tag for i in bookmark.favorite_tags.all()], ["python"])
def test_custom_queryset(self):
bookmark = Bookmark.objects.create(url="http://www.djangoproject.com/")
django_tag = TaggedItem.objects.create(content_object=bookmark, tag="django")
TaggedItem.objects.create(content_object=bookmark, tag="python")
with self.assertNumQueries(2):
bookmark = Bookmark.objects.prefetch_related(
Prefetch("tags", TaggedItem.objects.filter(tag="django")),
).get()
with self.assertNumQueries(0):
self.assertEqual(list(bookmark.tags.all()), [django_tag])
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
self.assertEqual(list(bookmark.tags.all()), list(bookmark.tags.all().all()))
def test_deleted_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="awesome", content_object=self.book2)
ct = ContentType.objects.get_for_model(Book)
book1_pk = self.book1.pk
self.book1.delete()
with self.assertNumQueries(2):
qs = TaggedItem.objects.filter(tag="awesome").prefetch_related(
"content_object"
)
result = [
(tag.object_id, tag.content_type_id, tag.content_object) for tag in qs
]
self.assertEqual(
result,
[
(book1_pk, ct.pk, None),
(self.book2.pk, ct.pk, self.book2),
],
)
class MultiTableInheritanceTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = BookWithYear.objects.create(title="Poems", published_year=2010)
cls.book2 = BookWithYear.objects.create(title="More poems", published_year=2011)
cls.author1 = AuthorWithAge.objects.create(
name="Jane", first_book=cls.book1, age=50
)
cls.author2 = AuthorWithAge.objects.create(
name="Tom", first_book=cls.book1, age=49
)
cls.author3 = AuthorWithAge.objects.create(
name="Robert", first_book=cls.book2, age=48
)
cls.author_address = AuthorAddress.objects.create(
author=cls.author1, address="SomeStreet 1"
)
cls.book2.aged_authors.add(cls.author2, cls.author3)
cls.br1 = BookReview.objects.create(book=cls.book1, notes="review book1")
cls.br2 = BookReview.objects.create(book=cls.book2, notes="review book2")
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = AuthorWithAge.objects.prefetch_related("addresses")
addresses = [
[str(address) for address in obj.addresses.all()] for obj in qs
]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_foreignkey_to_inherited(self):
with self.assertNumQueries(2):
qs = BookReview.objects.prefetch_related("book")
titles = [obj.book.title for obj in qs]
self.assertCountEqual(titles, ["Poems", "More poems"])
def test_m2m_to_inheriting_model(self):
qs = AuthorWithAge.objects.prefetch_related("books_with_year")
with self.assertNumQueries(2):
lst = [
[str(book) for book in author.books_with_year.all()] for author in qs
]
qs = AuthorWithAge.objects.all()
lst2 = [[str(book) for book in author.books_with_year.all()] for author in qs]
self.assertEqual(lst, lst2)
qs = BookWithYear.objects.prefetch_related("aged_authors")
with self.assertNumQueries(2):
lst = [[str(author) for author in book.aged_authors.all()] for book in qs]
qs = BookWithYear.objects.all()
lst2 = [[str(author) for author in book.aged_authors.all()] for book in qs]
self.assertEqual(lst, lst2)
def test_parent_link_prefetch(self):
with self.assertNumQueries(2):
[a.author for a in AuthorWithAge.objects.prefetch_related("author")]
@override_settings(DEBUG=True)
def test_child_link_prefetch(self):
with self.assertNumQueries(2):
authors = [
a.authorwithage
for a in Author.objects.prefetch_related("authorwithage")
]
# Regression for #18090: the prefetching query must include an IN clause.
# Note that on Oracle the table name is upper case in the generated SQL,
# thus the .lower() call.
self.assertIn("authorwithage", connection.queries[-1]["sql"].lower())
self.assertIn(" IN ", connection.queries[-1]["sql"])
self.assertEqual(authors, [a.authorwithage for a in Author.objects.all()])
class ForeignKeyToFieldTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book = Book.objects.create(title="Poems")
cls.author1 = Author.objects.create(name="Jane", first_book=cls.book)
cls.author2 = Author.objects.create(name="Tom", first_book=cls.book)
cls.author3 = Author.objects.create(name="Robert", first_book=cls.book)
cls.author_address = AuthorAddress.objects.create(
author=cls.author1, address="SomeStreet 1"
)
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
FavoriteAuthors.objects.create(author=cls.author2, likes_author=cls.author3)
FavoriteAuthors.objects.create(author=cls.author3, likes_author=cls.author1)
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = Author.objects.prefetch_related("addresses")
addresses = [
[str(address) for address in obj.addresses.all()] for obj in qs
]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_m2m(self):
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related("favorite_authors", "favors_me")
favorites = [
(
[str(i_like) for i_like in author.favorite_authors.all()],
[str(likes_me) for likes_me in author.favors_me.all()],
)
for author in qs
]
self.assertEqual(
favorites,
[
([str(self.author2)], [str(self.author3)]),
([str(self.author3)], [str(self.author1)]),
([str(self.author1)], [str(self.author2)]),
],
)
def test_m2m_manager_reused(self):
author = Author.objects.prefetch_related(
"favorite_authors",
"favors_me",
).first()
self.assertIs(author.favorite_authors, author.favorite_authors)
self.assertIs(author.favors_me, author.favors_me)
class LookupOrderingTest(TestCase):
"""
Test cases that demonstrate that ordering of lookups is important, and
ensure it is preserved.
"""
@classmethod
def setUpTestData(cls):
person1 = Person.objects.create(name="Joe")
person2 = Person.objects.create(name="Mary")
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house1 = House.objects.create(address="123 Main St")
room1_1 = Room.objects.create(name="Dining room", house=house1)
Room.objects.create(name="Lounge", house=house1)
Room.objects.create(name="Kitchen", house=house1)
house1.main_room = room1_1
house1.save()
person1.houses.add(house1)
house2 = House.objects.create(address="45 Side St")
room2_1 = Room.objects.create(name="Dining room", house=house2)
Room.objects.create(name="Lounge", house=house2)
house2.main_room = room2_1
house2.save()
person1.houses.add(house2)
house3 = House.objects.create(address="6 Downing St")
room3_1 = Room.objects.create(name="Dining room", house=house3)
Room.objects.create(name="Lounge", house=house3)
Room.objects.create(name="Kitchen", house=house3)
house3.main_room = room3_1
house3.save()
person2.houses.add(house3)
house4 = House.objects.create(address="7 Regents St")
room4_1 = Room.objects.create(name="Dining room", house=house4)
Room.objects.create(name="Lounge", house=house4)
house4.main_room = room4_1
house4.save()
person2.houses.add(house4)
def test_order(self):
with self.assertNumQueries(4):
# The following two queries must be done in the same order as written,
# otherwise 'primary_house' will cause non-prefetched lookups
qs = Person.objects.prefetch_related(
"houses__rooms", "primary_house__occupants"
)
[list(p.primary_house.occupants.all()) for p in qs]
class NullableTest(TestCase):
@classmethod
def setUpTestData(cls):
boss = Employee.objects.create(name="Peter")
Employee.objects.create(name="Joe", boss=boss)
Employee.objects.create(name="Angela", boss=boss)
def test_traverse_nullable(self):
# Because we use select_related() for 'boss', it doesn't need to be
# prefetched, but we can still traverse it although it contains some nulls
with self.assertNumQueries(2):
qs = Employee.objects.select_related("boss").prefetch_related("boss__serfs")
co_serfs = [
list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs
]
qs2 = Employee.objects.select_related("boss")
co_serfs2 = [
list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2
]
self.assertEqual(co_serfs, co_serfs2)
def test_prefetch_nullable(self):
# One for main employee, one for boss, one for serfs
with self.assertNumQueries(3):
qs = Employee.objects.prefetch_related("boss__serfs")
co_serfs = [
list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs
]
qs2 = Employee.objects.all()
co_serfs2 = [
list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2
]
self.assertEqual(co_serfs, co_serfs2)
def test_in_bulk(self):
"""
In-bulk does correctly prefetch objects by not using .iterator()
directly.
"""
boss1 = Employee.objects.create(name="Peter")
boss2 = Employee.objects.create(name="Jack")
with self.assertNumQueries(2):
# Prefetch is done and it does not cause any errors.
bulk = Employee.objects.prefetch_related("serfs").in_bulk(
[boss1.pk, boss2.pk]
)
for b in bulk.values():
list(b.serfs.all())
class MultiDbTests(TestCase):
databases = {"default", "other"}
def test_using_is_honored_m2m(self):
B = Book.objects.using("other")
A = Author.objects.using("other")
book1 = B.create(title="Poems")
book2 = B.create(title="Jane Eyre")
book3 = B.create(title="Wuthering Heights")
book4 = B.create(title="Sense and Sensibility")
author1 = A.create(name="Charlotte", first_book=book1)
author2 = A.create(name="Anne", first_book=book1)
author3 = A.create(name="Emily", first_book=book1)
author4 = A.create(name="Jane", first_book=book4)
book1.authors.add(author1, author2, author3)
book2.authors.add(author1)
book3.authors.add(author3)
book4.authors.add(author4)
# Forward
qs1 = B.prefetch_related("authors")
with self.assertNumQueries(2, using="other"):
books = "".join(
"%s (%s)\n"
% (book.title, ", ".join(a.name for a in book.authors.all()))
for book in qs1
)
self.assertEqual(
books,
"Poems (Charlotte, Anne, Emily)\n"
"Jane Eyre (Charlotte)\n"
"Wuthering Heights (Emily)\n"
"Sense and Sensibility (Jane)\n",
)
# Reverse
qs2 = A.prefetch_related("books")
with self.assertNumQueries(2, using="other"):
authors = "".join(
"%s: %s\n"
% (author.name, ", ".join(b.title for b in author.books.all()))
for author in qs2
)
self.assertEqual(
authors,
"Charlotte: Poems, Jane Eyre\n"
"Anne: Poems\n"
"Emily: Poems, Wuthering Heights\n"
"Jane: Sense and Sensibility\n",
)
def test_using_is_honored_fkey(self):
B = Book.objects.using("other")
A = Author.objects.using("other")
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Forward
with self.assertNumQueries(2, using="other"):
books = ", ".join(
a.first_book.title for a in A.prefetch_related("first_book")
)
self.assertEqual("Poems, Sense and Sensibility", books)
# Reverse
with self.assertNumQueries(2, using="other"):
books = "".join(
"%s (%s)\n"
% (b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related("first_time_authors")
)
self.assertEqual(
books,
"Poems (Charlotte Bronte)\nSense and Sensibility (Jane Austen)\n",
)
def test_using_is_honored_inheritance(self):
B = BookWithYear.objects.using("other")
A = AuthorWithAge.objects.using("other")
book1 = B.create(title="Poems", published_year=2010)
B.create(title="More poems", published_year=2011)
A.create(name="Jane", first_book=book1, age=50)
A.create(name="Tom", first_book=book1, age=49)
# parent link
with self.assertNumQueries(2, using="other"):
authors = ", ".join(a.author.name for a in A.prefetch_related("author"))
self.assertEqual(authors, "Jane, Tom")
# child link
with self.assertNumQueries(2, using="other"):
ages = ", ".join(
str(a.authorwithage.age) for a in A.prefetch_related("authorwithage")
)
self.assertEqual(ages, "50, 49")
def test_using_is_honored_custom_qs(self):
B = Book.objects.using("other")
A = Author.objects.using("other")
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Implicit hinting
with self.assertNumQueries(2, using="other"):
prefetch = Prefetch("first_time_authors", queryset=Author.objects.all())
books = "".join(
"%s (%s)\n"
% (b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch)
)
self.assertEqual(
books,
"Poems (Charlotte Bronte)\nSense and Sensibility (Jane Austen)\n",
)
# Explicit using on the same db.
with self.assertNumQueries(2, using="other"):
prefetch = Prefetch(
"first_time_authors", queryset=Author.objects.using("other")
)
books = "".join(
"%s (%s)\n"
% (b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch)
)
self.assertEqual(
books,
"Poems (Charlotte Bronte)\nSense and Sensibility (Jane Austen)\n",
)
# Explicit using on a different db.
with self.assertNumQueries(1, using="default"), self.assertNumQueries(
1, using="other"
):
prefetch = Prefetch(
"first_time_authors", queryset=Author.objects.using("default")
)
books = "".join(
"%s (%s)\n"
% (b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch)
)
self.assertEqual(books, "Poems ()\n" "Sense and Sensibility ()\n")
class Ticket19607Tests(TestCase):
@classmethod
def setUpTestData(cls):
LessonEntry.objects.bulk_create(
LessonEntry(id=id_, name1=name1, name2=name2)
for id_, name1, name2 in [
(1, "einfach", "simple"),
(2, "schwierig", "difficult"),
]
)
WordEntry.objects.bulk_create(
WordEntry(id=id_, lesson_entry_id=lesson_entry_id, name=name)
for id_, lesson_entry_id, name in [
(1, 1, "einfach"),
(2, 1, "simple"),
(3, 2, "schwierig"),
(4, 2, "difficult"),
]
)
def test_bug(self):
list(
WordEntry.objects.prefetch_related(
"lesson_entry", "lesson_entry__wordentry_set"
)
)
class Ticket21410Tests(TestCase):
@classmethod
def setUpTestData(cls):
book1 = Book.objects.create(title="Poems")
book2 = Book.objects.create(title="Jane Eyre")
book3 = Book.objects.create(title="Wuthering Heights")
book4 = Book.objects.create(title="Sense and Sensibility")
author1 = Author2.objects.create(name="Charlotte", first_book=book1)
author2 = Author2.objects.create(name="Anne", first_book=book1)
author3 = Author2.objects.create(name="Emily", first_book=book1)
author4 = Author2.objects.create(name="Jane", first_book=book4)
author1.favorite_books.add(book1, book2, book3)
author2.favorite_books.add(book1)
author3.favorite_books.add(book2)
author4.favorite_books.add(book3)
def test_bug(self):
list(Author2.objects.prefetch_related("first_book", "favorite_books"))
class Ticket21760Tests(TestCase):
@classmethod
def setUpTestData(cls):
cls.rooms = []
for _ in range(3):
house = House.objects.create()
for _ in range(3):
cls.rooms.append(Room.objects.create(house=house))
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house.main_room = cls.rooms[-3]
house.save()
def test_bug(self):
prefetcher = get_prefetcher(self.rooms[0], "house", "house")[0]
queryset = prefetcher.get_prefetch_queryset(list(Room.objects.all()))[0]
self.assertNotIn(" JOIN ", str(queryset.query))
class DirectPrefetchedObjectCacheReuseTests(TestCase):
"""
prefetch_related() reuses objects fetched in _prefetched_objects_cache.
When objects are prefetched and not stored as an instance attribute (often
intermediary relationships), they are saved to the
_prefetched_objects_cache attribute. prefetch_related() takes
_prefetched_objects_cache into account when determining whether an object
has been fetched[1] and retrieves results from it when it is populated [2].
[1]: #25546 (duplicate queries on nested Prefetch)
[2]: #27554 (queryset evaluation fails with a mix of nested and flattened
prefetches)
"""
@classmethod
def setUpTestData(cls):
cls.book1, cls.book2 = [
Book.objects.create(title="book1"),
Book.objects.create(title="book2"),
]
cls.author11, cls.author12, cls.author21 = [
Author.objects.create(first_book=cls.book1, name="Author11"),
Author.objects.create(first_book=cls.book1, name="Author12"),
Author.objects.create(first_book=cls.book2, name="Author21"),
]
cls.author1_address1, cls.author1_address2, cls.author2_address1 = [
AuthorAddress.objects.create(author=cls.author11, address="Happy place"),
AuthorAddress.objects.create(author=cls.author12, address="Haunted house"),
AuthorAddress.objects.create(author=cls.author21, address="Happy place"),
]
cls.bookwithyear1 = BookWithYear.objects.create(
title="Poems", published_year=2010
)
cls.bookreview1 = BookReview.objects.create(book=cls.bookwithyear1)
def test_detect_is_fetched(self):
"""
Nested prefetch_related() shouldn't trigger duplicate queries for the same
lookup.
"""
with self.assertNumQueries(3):
books = Book.objects.filter(title__in=["book1", "book2"],).prefetch_related(
Prefetch(
"first_time_authors",
Author.objects.prefetch_related(
Prefetch(
"addresses",
AuthorAddress.objects.filter(address="Happy place"),
)
),
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertSequenceEqual(
book1.first_time_authors.all(), [self.author11, self.author12]
)
self.assertSequenceEqual(book2.first_time_authors.all(), [self.author21])
self.assertSequenceEqual(
book1.first_time_authors.all()[0].addresses.all(),
[self.author1_address1],
)
self.assertSequenceEqual(
book1.first_time_authors.all()[1].addresses.all(), []
)
self.assertSequenceEqual(
book2.first_time_authors.all()[0].addresses.all(),
[self.author2_address1],
)
self.assertEqual(
list(book1.first_time_authors.all()),
list(book1.first_time_authors.all().all()),
)
self.assertEqual(
list(book2.first_time_authors.all()),
list(book2.first_time_authors.all().all()),
)
self.assertEqual(
list(book1.first_time_authors.all()[0].addresses.all()),
list(book1.first_time_authors.all()[0].addresses.all().all()),
)
self.assertEqual(
list(book1.first_time_authors.all()[1].addresses.all()),
list(book1.first_time_authors.all()[1].addresses.all().all()),
)
self.assertEqual(
list(book2.first_time_authors.all()[0].addresses.all()),
list(book2.first_time_authors.all()[0].addresses.all().all()),
)
def test_detect_is_fetched_with_to_attr(self):
with self.assertNumQueries(3):
books = Book.objects.filter(title__in=["book1", "book2"],).prefetch_related(
Prefetch(
"first_time_authors",
Author.objects.prefetch_related(
Prefetch(
"addresses",
AuthorAddress.objects.filter(address="Happy place"),
to_attr="happy_place",
)
),
to_attr="first_authors",
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertEqual(book1.first_authors, [self.author11, self.author12])
self.assertEqual(book2.first_authors, [self.author21])
self.assertEqual(
book1.first_authors[0].happy_place, [self.author1_address1]
)
self.assertEqual(book1.first_authors[1].happy_place, [])
self.assertEqual(
book2.first_authors[0].happy_place, [self.author2_address1]
)
def test_prefetch_reverse_foreign_key(self):
with self.assertNumQueries(2):
(bookwithyear1,) = BookWithYear.objects.prefetch_related("bookreview_set")
with self.assertNumQueries(0):
self.assertCountEqual(
bookwithyear1.bookreview_set.all(), [self.bookreview1]
)
with self.assertNumQueries(0):
prefetch_related_objects([bookwithyear1], "bookreview_set")
def test_add_clears_prefetched_objects(self):
bookwithyear = BookWithYear.objects.get(pk=self.bookwithyear1.pk)
prefetch_related_objects([bookwithyear], "bookreview_set")
self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1])
new_review = BookReview.objects.create()
bookwithyear.bookreview_set.add(new_review)
self.assertCountEqual(
bookwithyear.bookreview_set.all(), [self.bookreview1, new_review]
)
def test_remove_clears_prefetched_objects(self):
bookwithyear = BookWithYear.objects.get(pk=self.bookwithyear1.pk)
prefetch_related_objects([bookwithyear], "bookreview_set")
self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1])
bookwithyear.bookreview_set.remove(self.bookreview1)
self.assertCountEqual(bookwithyear.bookreview_set.all(), [])
class ReadPrefetchedObjectsCacheTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title="Les confessions Volume I")
cls.book2 = Book.objects.create(title="Candide")
cls.author1 = AuthorWithAge.objects.create(
name="Rousseau", first_book=cls.book1, age=70
)
cls.author2 = AuthorWithAge.objects.create(
name="Voltaire", first_book=cls.book2, age=65
)
cls.book1.authors.add(cls.author1)
cls.book2.authors.add(cls.author2)
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
def test_retrieves_results_from_prefetched_objects_cache(self):
"""
When intermediary results are prefetched without a destination
attribute, they are saved in the RelatedManager's cache
(_prefetched_objects_cache). prefetch_related() uses this cache
(#27554).
"""
authors = AuthorWithAge.objects.prefetch_related(
Prefetch(
"author",
queryset=Author.objects.prefetch_related(
# Results are saved in the RelatedManager's cache
# (_prefetched_objects_cache) and do not replace the
# RelatedManager on Author instances (favorite_authors)
Prefetch("favorite_authors__first_book"),
),
),
)
with self.assertNumQueries(4):
# AuthorWithAge -> Author -> FavoriteAuthors, Book
self.assertSequenceEqual(authors, [self.author1, self.author2])
class NestedPrefetchTests(TestCase):
@classmethod
def setUpTestData(cls):
house = House.objects.create(name="Big house", address="123 Main St")
cls.room = Room.objects.create(name="Kitchen", house=house)
def test_nested_prefetch_is_not_overwritten_by_related_object(self):
"""
The prefetched relationship is used rather than populating the reverse
relationship from the parent, when prefetching a set of child objects
related to a set of parent objects and the child queryset itself
specifies a prefetch back to the parent.
"""
queryset = House.objects.only("name").prefetch_related(
Prefetch(
"rooms",
queryset=Room.objects.prefetch_related(
Prefetch("house", queryset=House.objects.only("address")),
),
),
)
with self.assertNumQueries(3):
house = queryset.first()
self.assertIs(Room.house.is_cached(self.room), True)
with self.assertNumQueries(0):
house.rooms.first().house.address
class PrefetchLimitTests(TestDataMixin, TestCase):
@skipUnlessDBFeature("supports_over_clause")
def test_m2m_forward(self):
authors = Author.objects.all() # Meta.ordering
with self.assertNumQueries(3):
books = list(
Book.objects.prefetch_related(
Prefetch("authors", authors),
Prefetch("authors", authors[1:], to_attr="authors_sliced"),
)
)
for book in books:
with self.subTest(book=book):
self.assertEqual(book.authors_sliced, list(book.authors.all())[1:])
@skipUnlessDBFeature("supports_over_clause")
def test_m2m_reverse(self):
books = Book.objects.order_by("title")
with self.assertNumQueries(3):
authors = list(
Author.objects.prefetch_related(
Prefetch("books", books),
Prefetch("books", books[1:2], to_attr="books_sliced"),
)
)
for author in authors:
with self.subTest(author=author):
self.assertEqual(author.books_sliced, list(author.books.all())[1:2])
@skipUnlessDBFeature("supports_over_clause")
def test_foreignkey_reverse(self):
authors = Author.objects.order_by("-name")
with self.assertNumQueries(3):
books = list(
Book.objects.prefetch_related(
Prefetch(
"first_time_authors",
authors,
),
Prefetch(
"first_time_authors",
authors[1:],
to_attr="first_time_authors_sliced",
),
)
)
for book in books:
with self.subTest(book=book):
self.assertEqual(
book.first_time_authors_sliced,
list(book.first_time_authors.all())[1:],
)
@skipUnlessDBFeature("supports_over_clause")
def test_reverse_ordering(self):
authors = Author.objects.reverse() # Reverse Meta.ordering
with self.assertNumQueries(3):
books = list(
Book.objects.prefetch_related(
Prefetch("authors", authors),
Prefetch("authors", authors[1:], to_attr="authors_sliced"),
)
)
for book in books:
with self.subTest(book=book):
self.assertEqual(book.authors_sliced, list(book.authors.all())[1:])
@skipIfDBFeature("supports_over_clause")
def test_window_not_supported(self):
authors = Author.objects.all()
msg = (
"Prefetching from a limited queryset is only supported on backends that "
"support window functions."
)
with self.assertRaisesMessage(NotSupportedError, msg):
list(Book.objects.prefetch_related(Prefetch("authors", authors[1:])))
|
0e7ea26f7a99165aebc2c57091a116b294388a459d3e03469b06746fa1ebb52e | from operator import attrgetter
from django.contrib.contenttypes.models import ContentType
from django.contrib.sessions.backends.db import SessionStore
from django.db import models
from django.db.models import Count
from django.test import TestCase, ignore_warnings, override_settings
from django.utils.deprecation import RemovedInDjango50Warning
from .models import (
Base,
Child,
Derived,
Feature,
Item,
ItemAndSimpleItem,
Leaf,
Location,
OneToOneItem,
Proxy,
ProxyRelated,
RelatedItem,
Request,
ResolveThis,
SimpleItem,
SpecialFeature,
)
class DeferRegressionTest(TestCase):
def test_basic(self):
# Deferred fields should really be deferred and not accidentally use
# the field's default value just because they aren't passed to __init__
Item.objects.create(name="first", value=42)
obj = Item.objects.only("name", "other_value").get(name="first")
# Accessing "name" doesn't trigger a new database query. Accessing
# "value" or "text" should.
with self.assertNumQueries(0):
self.assertEqual(obj.name, "first")
self.assertEqual(obj.other_value, 0)
with self.assertNumQueries(1):
self.assertEqual(obj.value, 42)
with self.assertNumQueries(1):
self.assertEqual(obj.text, "xyzzy")
with self.assertNumQueries(0):
self.assertEqual(obj.text, "xyzzy")
# Regression test for #10695. Make sure different instances don't
# inadvertently share data in the deferred descriptor objects.
i = Item.objects.create(name="no I'm first", value=37)
items = Item.objects.only("value").order_by("-value")
self.assertEqual(items[0].name, "first")
self.assertEqual(items[1].name, "no I'm first")
RelatedItem.objects.create(item=i)
r = RelatedItem.objects.defer("item").get()
self.assertEqual(r.item_id, i.id)
self.assertEqual(r.item, i)
# Some further checks for select_related() and inherited model
# behavior (regression for #10710).
c1 = Child.objects.create(name="c1", value=42)
c2 = Child.objects.create(name="c2", value=37)
Leaf.objects.create(name="l1", child=c1, second_child=c2)
obj = Leaf.objects.only("name", "child").select_related()[0]
self.assertEqual(obj.child.name, "c1")
self.assertQuerysetEqual(
Leaf.objects.select_related().only("child__name", "second_child__name"),
[
"l1",
],
attrgetter("name"),
)
# Models instances with deferred fields should still return the same
# content types as their non-deferred versions (bug #10738).
ctype = ContentType.objects.get_for_model
c1 = ctype(Item.objects.all()[0])
c2 = ctype(Item.objects.defer("name")[0])
c3 = ctype(Item.objects.only("name")[0])
self.assertTrue(c1 is c2 is c3)
# Regression for #10733 - only() can be used on a model with two
# foreign keys.
results = Leaf.objects.only("name", "child", "second_child").select_related()
self.assertEqual(results[0].child.name, "c1")
self.assertEqual(results[0].second_child.name, "c2")
results = Leaf.objects.only(
"name", "child", "second_child", "child__name", "second_child__name"
).select_related()
self.assertEqual(results[0].child.name, "c1")
self.assertEqual(results[0].second_child.name, "c2")
# Regression for #16409 - make sure defer() and only() work with annotate()
self.assertIsInstance(
list(SimpleItem.objects.annotate(Count("feature")).defer("name")), list
)
self.assertIsInstance(
list(SimpleItem.objects.annotate(Count("feature")).only("name")), list
)
@ignore_warnings(category=RemovedInDjango50Warning)
@override_settings(
SESSION_SERIALIZER="django.contrib.sessions.serializers.PickleSerializer"
)
def test_ticket_12163(self):
# Test for #12163 - Pickling error saving session with unsaved model
# instances.
SESSION_KEY = "2b1189a188b44ad18c35e1baac6ceead"
item = Item()
item._deferred = False
s = SessionStore(SESSION_KEY)
s.clear()
s["item"] = item
s.save(must_create=True)
s = SessionStore(SESSION_KEY)
s.modified = True
s.save()
i2 = s["item"]
self.assertFalse(i2._deferred)
def test_ticket_16409(self):
# Regression for #16409 - make sure defer() and only() work with annotate()
self.assertIsInstance(
list(SimpleItem.objects.annotate(Count("feature")).defer("name")), list
)
self.assertIsInstance(
list(SimpleItem.objects.annotate(Count("feature")).only("name")), list
)
def test_ticket_23270(self):
d = Derived.objects.create(text="foo", other_text="bar")
with self.assertNumQueries(1):
obj = Base.objects.select_related("derived").defer("text")[0]
self.assertIsInstance(obj.derived, Derived)
self.assertEqual("bar", obj.derived.other_text)
self.assertNotIn("text", obj.__dict__)
self.assertEqual(d.pk, obj.derived.base_ptr_id)
def test_only_and_defer_usage_on_proxy_models(self):
# Regression for #15790 - only() broken for proxy models
proxy = Proxy.objects.create(name="proxy", value=42)
msg = "QuerySet.only() return bogus results with proxy models"
dp = Proxy.objects.only("other_value").get(pk=proxy.pk)
self.assertEqual(dp.name, proxy.name, msg=msg)
self.assertEqual(dp.value, proxy.value, msg=msg)
# also test things with .defer()
msg = "QuerySet.defer() return bogus results with proxy models"
dp = Proxy.objects.defer("name", "text", "value").get(pk=proxy.pk)
self.assertEqual(dp.name, proxy.name, msg=msg)
self.assertEqual(dp.value, proxy.value, msg=msg)
def test_resolve_columns(self):
ResolveThis.objects.create(num=5.0, name="Foobar")
qs = ResolveThis.objects.defer("num")
self.assertEqual(1, qs.count())
self.assertEqual("Foobar", qs[0].name)
def test_reverse_one_to_one_relations(self):
# Refs #14694. Test reverse relations which are known unique (reverse
# side has o2ofield or unique FK) - the o2o case
item = Item.objects.create(name="first", value=42)
o2o = OneToOneItem.objects.create(item=item, name="second")
self.assertEqual(len(Item.objects.defer("one_to_one_item__name")), 1)
self.assertEqual(len(Item.objects.select_related("one_to_one_item")), 1)
self.assertEqual(
len(
Item.objects.select_related("one_to_one_item").defer(
"one_to_one_item__name"
)
),
1,
)
self.assertEqual(
len(Item.objects.select_related("one_to_one_item").defer("value")), 1
)
# Make sure that `only()` doesn't break when we pass in a unique relation,
# rather than a field on the relation.
self.assertEqual(len(Item.objects.only("one_to_one_item")), 1)
with self.assertNumQueries(1):
i = Item.objects.select_related("one_to_one_item")[0]
self.assertEqual(i.one_to_one_item.pk, o2o.pk)
self.assertEqual(i.one_to_one_item.name, "second")
with self.assertNumQueries(1):
i = Item.objects.select_related("one_to_one_item").defer(
"value", "one_to_one_item__name"
)[0]
self.assertEqual(i.one_to_one_item.pk, o2o.pk)
self.assertEqual(i.name, "first")
with self.assertNumQueries(1):
self.assertEqual(i.one_to_one_item.name, "second")
with self.assertNumQueries(1):
self.assertEqual(i.value, 42)
def test_defer_with_select_related(self):
item1 = Item.objects.create(name="first", value=47)
item2 = Item.objects.create(name="second", value=42)
simple = SimpleItem.objects.create(name="simple", value="23")
ItemAndSimpleItem.objects.create(item=item1, simple=simple)
obj = ItemAndSimpleItem.objects.defer("item").select_related("simple").get()
self.assertEqual(obj.item, item1)
self.assertEqual(obj.item_id, item1.id)
obj.item = item2
obj.save()
obj = ItemAndSimpleItem.objects.defer("item").select_related("simple").get()
self.assertEqual(obj.item, item2)
self.assertEqual(obj.item_id, item2.id)
def test_proxy_model_defer_with_select_related(self):
# Regression for #22050
item = Item.objects.create(name="first", value=47)
RelatedItem.objects.create(item=item)
# Defer fields with only()
obj = ProxyRelated.objects.select_related().only("item__name")[0]
with self.assertNumQueries(0):
self.assertEqual(obj.item.name, "first")
with self.assertNumQueries(1):
self.assertEqual(obj.item.value, 47)
def test_only_with_select_related(self):
# Test for #17485.
item = SimpleItem.objects.create(name="first", value=47)
feature = Feature.objects.create(item=item)
SpecialFeature.objects.create(feature=feature)
qs = Feature.objects.only("item__name").select_related("item")
self.assertEqual(len(qs), 1)
qs = SpecialFeature.objects.only("feature__item__name").select_related(
"feature__item"
)
self.assertEqual(len(qs), 1)
def test_defer_annotate_select_related(self):
location = Location.objects.create()
Request.objects.create(location=location)
self.assertIsInstance(
list(
Request.objects.annotate(Count("items"))
.select_related("profile", "location")
.only("profile", "location")
),
list,
)
self.assertIsInstance(
list(
Request.objects.annotate(Count("items"))
.select_related("profile", "location")
.only("profile__profile1", "location__location1")
),
list,
)
self.assertIsInstance(
list(
Request.objects.annotate(Count("items"))
.select_related("profile", "location")
.defer("request1", "request2", "request3", "request4")
),
list,
)
def test_common_model_different_mask(self):
child = Child.objects.create(name="Child", value=42)
second_child = Child.objects.create(name="Second", value=64)
Leaf.objects.create(child=child, second_child=second_child)
with self.assertNumQueries(1):
leaf = (
Leaf.objects.select_related("child", "second_child")
.defer("child__name", "second_child__value")
.get()
)
self.assertEqual(leaf.child, child)
self.assertEqual(leaf.second_child, second_child)
self.assertEqual(leaf.child.get_deferred_fields(), {"name"})
self.assertEqual(leaf.second_child.get_deferred_fields(), {"value"})
with self.assertNumQueries(0):
self.assertEqual(leaf.child.value, 42)
self.assertEqual(leaf.second_child.name, "Second")
with self.assertNumQueries(1):
self.assertEqual(leaf.child.name, "Child")
with self.assertNumQueries(1):
self.assertEqual(leaf.second_child.value, 64)
class DeferDeletionSignalsTests(TestCase):
senders = [Item, Proxy]
@classmethod
def setUpTestData(cls):
cls.item_pk = Item.objects.create(value=1).pk
def setUp(self):
self.pre_delete_senders = []
self.post_delete_senders = []
for sender in self.senders:
models.signals.pre_delete.connect(self.pre_delete_receiver, sender)
models.signals.post_delete.connect(self.post_delete_receiver, sender)
def tearDown(self):
for sender in self.senders:
models.signals.pre_delete.disconnect(self.pre_delete_receiver, sender)
models.signals.post_delete.disconnect(self.post_delete_receiver, sender)
def pre_delete_receiver(self, sender, **kwargs):
self.pre_delete_senders.append(sender)
def post_delete_receiver(self, sender, **kwargs):
self.post_delete_senders.append(sender)
def test_delete_defered_model(self):
Item.objects.only("value").get(pk=self.item_pk).delete()
self.assertEqual(self.pre_delete_senders, [Item])
self.assertEqual(self.post_delete_senders, [Item])
def test_delete_defered_proxy_model(self):
Proxy.objects.only("value").get(pk=self.item_pk).delete()
self.assertEqual(self.pre_delete_senders, [Proxy])
self.assertEqual(self.post_delete_senders, [Proxy])
|
e7f5cceeecff6893bf60d30b8949af1409f26550a7afd3e5d7619c7ff7d23350 | import time
import unittest
from datetime import date, datetime
from django.core.exceptions import FieldError
from django.db import connection, models
from django.db.models.fields.related_lookups import RelatedGreaterThan
from django.db.models.lookups import EndsWith, StartsWith
from django.test import SimpleTestCase, TestCase, override_settings
from django.test.utils import register_lookup
from django.utils import timezone
from .models import Article, Author, MySQLUnixTimestamp
class Div3Lookup(models.Lookup):
lookup_name = "div3"
def as_sql(self, compiler, connection):
lhs, params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params.extend(rhs_params)
return "(%s) %%%% 3 = %s" % (lhs, rhs), params
def as_oracle(self, compiler, connection):
lhs, params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params.extend(rhs_params)
return "mod(%s, 3) = %s" % (lhs, rhs), params
class Div3Transform(models.Transform):
lookup_name = "div3"
def as_sql(self, compiler, connection):
lhs, lhs_params = compiler.compile(self.lhs)
return "(%s) %%%% 3" % lhs, lhs_params
def as_oracle(self, compiler, connection, **extra_context):
lhs, lhs_params = compiler.compile(self.lhs)
return "mod(%s, 3)" % lhs, lhs_params
class Div3BilateralTransform(Div3Transform):
bilateral = True
class Mult3BilateralTransform(models.Transform):
bilateral = True
lookup_name = "mult3"
def as_sql(self, compiler, connection):
lhs, lhs_params = compiler.compile(self.lhs)
return "3 * (%s)" % lhs, lhs_params
class LastDigitTransform(models.Transform):
lookup_name = "lastdigit"
def as_sql(self, compiler, connection):
lhs, lhs_params = compiler.compile(self.lhs)
return "SUBSTR(CAST(%s AS CHAR(2)), 2, 1)" % lhs, lhs_params
class UpperBilateralTransform(models.Transform):
bilateral = True
lookup_name = "upper"
def as_sql(self, compiler, connection):
lhs, lhs_params = compiler.compile(self.lhs)
return "UPPER(%s)" % lhs, lhs_params
class YearTransform(models.Transform):
# Use a name that avoids collision with the built-in year lookup.
lookup_name = "testyear"
def as_sql(self, compiler, connection):
lhs_sql, params = compiler.compile(self.lhs)
return connection.ops.date_extract_sql("year", lhs_sql, params)
@property
def output_field(self):
return models.IntegerField()
@YearTransform.register_lookup
class YearExact(models.lookups.Lookup):
lookup_name = "exact"
def as_sql(self, compiler, connection):
# We will need to skip the extract part, and instead go
# directly with the originating field, that is self.lhs.lhs
lhs_sql, lhs_params = self.process_lhs(compiler, connection, self.lhs.lhs)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
# Note that we must be careful so that we have params in the
# same order as we have the parts in the SQL.
params = lhs_params + rhs_params + lhs_params + rhs_params
# We use PostgreSQL specific SQL here. Note that we must do the
# conversions in SQL instead of in Python to support F() references.
return (
"%(lhs)s >= (%(rhs)s || '-01-01')::date "
"AND %(lhs)s <= (%(rhs)s || '-12-31')::date"
% {"lhs": lhs_sql, "rhs": rhs_sql},
params,
)
@YearTransform.register_lookup
class YearLte(models.lookups.LessThanOrEqual):
"""
The purpose of this lookup is to efficiently compare the year of the field.
"""
def as_sql(self, compiler, connection):
# Skip the YearTransform above us (no possibility for efficient
# lookup otherwise).
real_lhs = self.lhs.lhs
lhs_sql, params = self.process_lhs(compiler, connection, real_lhs)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params.extend(rhs_params)
# Build SQL where the integer year is concatenated with last month
# and day, then convert that to date. (We try to have SQL like:
# WHERE somecol <= '2013-12-31')
# but also make it work if the rhs_sql is field reference.
return "%s <= (%s || '-12-31')::date" % (lhs_sql, rhs_sql), params
class Exactly(models.lookups.Exact):
"""
This lookup is used to test lookup registration.
"""
lookup_name = "exactly"
def get_rhs_op(self, connection, rhs):
return connection.operators["exact"] % rhs
class SQLFuncMixin:
def as_sql(self, compiler, connection):
return "%s()" % self.name, []
@property
def output_field(self):
return CustomField()
class SQLFuncLookup(SQLFuncMixin, models.Lookup):
def __init__(self, name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = name
class SQLFuncTransform(SQLFuncMixin, models.Transform):
def __init__(self, name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = name
class SQLFuncFactory:
def __init__(self, key, name):
self.key = key
self.name = name
def __call__(self, *args, **kwargs):
if self.key == "lookupfunc":
return SQLFuncLookup(self.name, *args, **kwargs)
return SQLFuncTransform(self.name, *args, **kwargs)
class CustomField(models.TextField):
def get_lookup(self, lookup_name):
if lookup_name.startswith("lookupfunc_"):
key, name = lookup_name.split("_", 1)
return SQLFuncFactory(key, name)
return super().get_lookup(lookup_name)
def get_transform(self, lookup_name):
if lookup_name.startswith("transformfunc_"):
key, name = lookup_name.split("_", 1)
return SQLFuncFactory(key, name)
return super().get_transform(lookup_name)
class CustomModel(models.Model):
field = CustomField()
# We will register this class temporarily in the test method.
class InMonth(models.lookups.Lookup):
"""
InMonth matches if the column's month is the same as value's month.
"""
lookup_name = "inmonth"
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
# We need to be careful so that we get the params in right
# places.
params = lhs_params + rhs_params + lhs_params + rhs_params
return (
"%s >= date_trunc('month', %s) and "
"%s < date_trunc('month', %s) + interval '1 months'" % (lhs, rhs, lhs, rhs),
params,
)
class DateTimeTransform(models.Transform):
lookup_name = "as_datetime"
@property
def output_field(self):
return models.DateTimeField()
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return "from_unixtime({})".format(lhs), params
class CustomStartsWith(StartsWith):
lookup_name = "sw"
class CustomEndsWith(EndsWith):
lookup_name = "ew"
class RelatedMoreThan(RelatedGreaterThan):
lookup_name = "rmt"
class LookupTests(TestCase):
def test_custom_name_lookup(self):
a1 = Author.objects.create(name="a1", birthdate=date(1981, 2, 16))
Author.objects.create(name="a2", birthdate=date(2012, 2, 29))
with register_lookup(models.DateField, YearTransform), register_lookup(
models.DateField, YearTransform, lookup_name="justtheyear"
), register_lookup(YearTransform, Exactly), register_lookup(
YearTransform, Exactly, lookup_name="isactually"
):
qs1 = Author.objects.filter(birthdate__testyear__exactly=1981)
qs2 = Author.objects.filter(birthdate__justtheyear__isactually=1981)
self.assertSequenceEqual(qs1, [a1])
self.assertSequenceEqual(qs2, [a1])
def test_custom_exact_lookup_none_rhs(self):
"""
__exact=None is transformed to __isnull=True if a custom lookup class
with lookup_name != 'exact' is registered as the `exact` lookup.
"""
field = Author._meta.get_field("birthdate")
OldExactLookup = field.get_lookup("exact")
author = Author.objects.create(name="author", birthdate=None)
try:
field.register_lookup(Exactly, "exact")
self.assertEqual(Author.objects.get(birthdate__exact=None), author)
finally:
field.register_lookup(OldExactLookup, "exact")
def test_basic_lookup(self):
a1 = Author.objects.create(name="a1", age=1)
a2 = Author.objects.create(name="a2", age=2)
a3 = Author.objects.create(name="a3", age=3)
a4 = Author.objects.create(name="a4", age=4)
with register_lookup(models.IntegerField, Div3Lookup):
self.assertSequenceEqual(Author.objects.filter(age__div3=0), [a3])
self.assertSequenceEqual(
Author.objects.filter(age__div3=1).order_by("age"), [a1, a4]
)
self.assertSequenceEqual(Author.objects.filter(age__div3=2), [a2])
self.assertSequenceEqual(Author.objects.filter(age__div3=3), [])
@unittest.skipUnless(
connection.vendor == "postgresql", "PostgreSQL specific SQL used"
)
def test_birthdate_month(self):
a1 = Author.objects.create(name="a1", birthdate=date(1981, 2, 16))
a2 = Author.objects.create(name="a2", birthdate=date(2012, 2, 29))
a3 = Author.objects.create(name="a3", birthdate=date(2012, 1, 31))
a4 = Author.objects.create(name="a4", birthdate=date(2012, 3, 1))
with register_lookup(models.DateField, InMonth):
self.assertSequenceEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 1, 15)), [a3]
)
self.assertSequenceEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 2, 1)), [a2]
)
self.assertSequenceEqual(
Author.objects.filter(birthdate__inmonth=date(1981, 2, 28)), [a1]
)
self.assertSequenceEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 3, 12)), [a4]
)
self.assertSequenceEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 4, 1)), []
)
def test_div3_extract(self):
with register_lookup(models.IntegerField, Div3Transform):
a1 = Author.objects.create(name="a1", age=1)
a2 = Author.objects.create(name="a2", age=2)
a3 = Author.objects.create(name="a3", age=3)
a4 = Author.objects.create(name="a4", age=4)
baseqs = Author.objects.order_by("name")
self.assertSequenceEqual(baseqs.filter(age__div3=2), [a2])
self.assertSequenceEqual(baseqs.filter(age__div3__lte=3), [a1, a2, a3, a4])
self.assertSequenceEqual(baseqs.filter(age__div3__in=[0, 2]), [a2, a3])
self.assertSequenceEqual(baseqs.filter(age__div3__in=[2, 4]), [a2])
self.assertSequenceEqual(baseqs.filter(age__div3__gte=3), [])
self.assertSequenceEqual(
baseqs.filter(age__div3__range=(1, 2)), [a1, a2, a4]
)
def test_foreignobject_lookup_registration(self):
field = Article._meta.get_field("author")
with register_lookup(models.ForeignObject, Exactly):
self.assertIs(field.get_lookup("exactly"), Exactly)
# ForeignObject should ignore regular Field lookups
with register_lookup(models.Field, Exactly):
self.assertIsNone(field.get_lookup("exactly"))
def test_lookups_caching(self):
field = Article._meta.get_field("author")
# clear and re-cache
field.get_class_lookups.cache_clear()
self.assertNotIn("exactly", field.get_lookups())
# registration should bust the cache
with register_lookup(models.ForeignObject, Exactly):
# getting the lookups again should re-cache
self.assertIn("exactly", field.get_lookups())
# Unregistration should bust the cache.
self.assertNotIn("exactly", field.get_lookups())
class BilateralTransformTests(TestCase):
def test_bilateral_upper(self):
with register_lookup(models.CharField, UpperBilateralTransform):
author1 = Author.objects.create(name="Doe")
author2 = Author.objects.create(name="doe")
author3 = Author.objects.create(name="Foo")
self.assertCountEqual(
Author.objects.filter(name__upper="doe"),
[author1, author2],
)
self.assertSequenceEqual(
Author.objects.filter(name__upper__contains="f"),
[author3],
)
def test_bilateral_inner_qs(self):
with register_lookup(models.CharField, UpperBilateralTransform):
msg = "Bilateral transformations on nested querysets are not implemented."
with self.assertRaisesMessage(NotImplementedError, msg):
Author.objects.filter(
name__upper__in=Author.objects.values_list("name")
)
def test_bilateral_multi_value(self):
with register_lookup(models.CharField, UpperBilateralTransform):
Author.objects.bulk_create(
[
Author(name="Foo"),
Author(name="Bar"),
Author(name="Ray"),
]
)
self.assertQuerysetEqual(
Author.objects.filter(name__upper__in=["foo", "bar", "doe"]).order_by(
"name"
),
["Bar", "Foo"],
lambda a: a.name,
)
def test_div3_bilateral_extract(self):
with register_lookup(models.IntegerField, Div3BilateralTransform):
a1 = Author.objects.create(name="a1", age=1)
a2 = Author.objects.create(name="a2", age=2)
a3 = Author.objects.create(name="a3", age=3)
a4 = Author.objects.create(name="a4", age=4)
baseqs = Author.objects.order_by("name")
self.assertSequenceEqual(baseqs.filter(age__div3=2), [a2])
self.assertSequenceEqual(baseqs.filter(age__div3__lte=3), [a3])
self.assertSequenceEqual(baseqs.filter(age__div3__in=[0, 2]), [a2, a3])
self.assertSequenceEqual(baseqs.filter(age__div3__in=[2, 4]), [a1, a2, a4])
self.assertSequenceEqual(baseqs.filter(age__div3__gte=3), [a1, a2, a3, a4])
self.assertSequenceEqual(
baseqs.filter(age__div3__range=(1, 2)), [a1, a2, a4]
)
def test_bilateral_order(self):
with register_lookup(
models.IntegerField, Mult3BilateralTransform, Div3BilateralTransform
):
a1 = Author.objects.create(name="a1", age=1)
a2 = Author.objects.create(name="a2", age=2)
a3 = Author.objects.create(name="a3", age=3)
a4 = Author.objects.create(name="a4", age=4)
baseqs = Author.objects.order_by("name")
# mult3__div3 always leads to 0
self.assertSequenceEqual(
baseqs.filter(age__mult3__div3=42), [a1, a2, a3, a4]
)
self.assertSequenceEqual(baseqs.filter(age__div3__mult3=42), [a3])
def test_transform_order_by(self):
with register_lookup(models.IntegerField, LastDigitTransform):
a1 = Author.objects.create(name="a1", age=11)
a2 = Author.objects.create(name="a2", age=23)
a3 = Author.objects.create(name="a3", age=32)
a4 = Author.objects.create(name="a4", age=40)
qs = Author.objects.order_by("age__lastdigit")
self.assertSequenceEqual(qs, [a4, a1, a3, a2])
def test_bilateral_fexpr(self):
with register_lookup(models.IntegerField, Mult3BilateralTransform):
a1 = Author.objects.create(name="a1", age=1, average_rating=3.2)
a2 = Author.objects.create(name="a2", age=2, average_rating=0.5)
a3 = Author.objects.create(name="a3", age=3, average_rating=1.5)
a4 = Author.objects.create(name="a4", age=4)
baseqs = Author.objects.order_by("name")
self.assertSequenceEqual(
baseqs.filter(age__mult3=models.F("age")), [a1, a2, a3, a4]
)
# Same as age >= average_rating
self.assertSequenceEqual(
baseqs.filter(age__mult3__gte=models.F("average_rating")), [a2, a3]
)
@override_settings(USE_TZ=True)
class DateTimeLookupTests(TestCase):
@unittest.skipUnless(connection.vendor == "mysql", "MySQL specific SQL used")
def test_datetime_output_field(self):
with register_lookup(models.PositiveIntegerField, DateTimeTransform):
ut = MySQLUnixTimestamp.objects.create(timestamp=time.time())
y2k = timezone.make_aware(datetime(2000, 1, 1))
self.assertSequenceEqual(
MySQLUnixTimestamp.objects.filter(timestamp__as_datetime__gt=y2k), [ut]
)
class YearLteTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name="a1", birthdate=date(1981, 2, 16))
cls.a2 = Author.objects.create(name="a2", birthdate=date(2012, 2, 29))
cls.a3 = Author.objects.create(name="a3", birthdate=date(2012, 1, 31))
cls.a4 = Author.objects.create(name="a4", birthdate=date(2012, 3, 1))
def setUp(self):
models.DateField.register_lookup(YearTransform)
def tearDown(self):
models.DateField._unregister_lookup(YearTransform)
@unittest.skipUnless(
connection.vendor == "postgresql", "PostgreSQL specific SQL used"
)
def test_year_lte(self):
baseqs = Author.objects.order_by("name")
self.assertSequenceEqual(
baseqs.filter(birthdate__testyear__lte=2012),
[self.a1, self.a2, self.a3, self.a4],
)
self.assertSequenceEqual(
baseqs.filter(birthdate__testyear=2012), [self.a2, self.a3, self.a4]
)
self.assertNotIn("BETWEEN", str(baseqs.filter(birthdate__testyear=2012).query))
self.assertSequenceEqual(
baseqs.filter(birthdate__testyear__lte=2011), [self.a1]
)
# The non-optimized version works, too.
self.assertSequenceEqual(baseqs.filter(birthdate__testyear__lt=2012), [self.a1])
@unittest.skipUnless(
connection.vendor == "postgresql", "PostgreSQL specific SQL used"
)
def test_year_lte_fexpr(self):
self.a2.age = 2011
self.a2.save()
self.a3.age = 2012
self.a3.save()
self.a4.age = 2013
self.a4.save()
baseqs = Author.objects.order_by("name")
self.assertSequenceEqual(
baseqs.filter(birthdate__testyear__lte=models.F("age")), [self.a3, self.a4]
)
self.assertSequenceEqual(
baseqs.filter(birthdate__testyear__lt=models.F("age")), [self.a4]
)
def test_year_lte_sql(self):
# This test will just check the generated SQL for __lte. This
# doesn't require running on PostgreSQL and spots the most likely
# error - not running YearLte SQL at all.
baseqs = Author.objects.order_by("name")
self.assertIn(
"<= (2011 || ", str(baseqs.filter(birthdate__testyear__lte=2011).query)
)
self.assertIn("-12-31", str(baseqs.filter(birthdate__testyear__lte=2011).query))
def test_postgres_year_exact(self):
baseqs = Author.objects.order_by("name")
self.assertIn("= (2011 || ", str(baseqs.filter(birthdate__testyear=2011).query))
self.assertIn("-12-31", str(baseqs.filter(birthdate__testyear=2011).query))
def test_custom_implementation_year_exact(self):
try:
# Two ways to add a customized implementation for different backends:
# First is MonkeyPatch of the class.
def as_custom_sql(self, compiler, connection):
lhs_sql, lhs_params = self.process_lhs(
compiler, connection, self.lhs.lhs
)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params + lhs_params + rhs_params
return (
"%(lhs)s >= "
"str_to_date(concat(%(rhs)s, '-01-01'), '%%%%Y-%%%%m-%%%%d') "
"AND %(lhs)s <= "
"str_to_date(concat(%(rhs)s, '-12-31'), '%%%%Y-%%%%m-%%%%d')"
% {"lhs": lhs_sql, "rhs": rhs_sql},
params,
)
setattr(YearExact, "as_" + connection.vendor, as_custom_sql)
self.assertIn(
"concat(", str(Author.objects.filter(birthdate__testyear=2012).query)
)
finally:
delattr(YearExact, "as_" + connection.vendor)
try:
# The other way is to subclass the original lookup and register the
# subclassed lookup instead of the original.
class CustomYearExact(YearExact):
# This method should be named "as_mysql" for MySQL,
# "as_postgresql" for postgres and so on, but as we don't know
# which DB we are running on, we need to use setattr.
def as_custom_sql(self, compiler, connection):
lhs_sql, lhs_params = self.process_lhs(
compiler, connection, self.lhs.lhs
)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params + lhs_params + rhs_params
return (
"%(lhs)s >= "
"str_to_date(CONCAT(%(rhs)s, '-01-01'), '%%%%Y-%%%%m-%%%%d') "
"AND %(lhs)s <= "
"str_to_date(CONCAT(%(rhs)s, '-12-31'), '%%%%Y-%%%%m-%%%%d')"
% {"lhs": lhs_sql, "rhs": rhs_sql},
params,
)
setattr(
CustomYearExact,
"as_" + connection.vendor,
CustomYearExact.as_custom_sql,
)
YearTransform.register_lookup(CustomYearExact)
self.assertIn(
"CONCAT(", str(Author.objects.filter(birthdate__testyear=2012).query)
)
finally:
YearTransform._unregister_lookup(CustomYearExact)
YearTransform.register_lookup(YearExact)
class TrackCallsYearTransform(YearTransform):
# Use a name that avoids collision with the built-in year lookup.
lookup_name = "testyear"
call_order = []
def as_sql(self, compiler, connection):
lhs_sql, params = compiler.compile(self.lhs)
return connection.ops.date_extract_sql("year", lhs_sql), params
@property
def output_field(self):
return models.IntegerField()
def get_lookup(self, lookup_name):
self.call_order.append("lookup")
return super().get_lookup(lookup_name)
def get_transform(self, lookup_name):
self.call_order.append("transform")
return super().get_transform(lookup_name)
class LookupTransformCallOrderTests(SimpleTestCase):
def test_call_order(self):
with register_lookup(models.DateField, TrackCallsYearTransform):
# junk lookup - tries lookup, then transform, then fails
msg = (
"Unsupported lookup 'junk' for IntegerField or join on the field not "
"permitted."
)
with self.assertRaisesMessage(FieldError, msg):
Author.objects.filter(birthdate__testyear__junk=2012)
self.assertEqual(
TrackCallsYearTransform.call_order, ["lookup", "transform"]
)
TrackCallsYearTransform.call_order = []
# junk transform - tries transform only, then fails
with self.assertRaisesMessage(FieldError, msg):
Author.objects.filter(birthdate__testyear__junk__more_junk=2012)
self.assertEqual(TrackCallsYearTransform.call_order, ["transform"])
TrackCallsYearTransform.call_order = []
# Just getting the year (implied __exact) - lookup only
Author.objects.filter(birthdate__testyear=2012)
self.assertEqual(TrackCallsYearTransform.call_order, ["lookup"])
TrackCallsYearTransform.call_order = []
# Just getting the year (explicit __exact) - lookup only
Author.objects.filter(birthdate__testyear__exact=2012)
self.assertEqual(TrackCallsYearTransform.call_order, ["lookup"])
class CustomisedMethodsTests(SimpleTestCase):
def test_overridden_get_lookup(self):
q = CustomModel.objects.filter(field__lookupfunc_monkeys=3)
self.assertIn("monkeys()", str(q.query))
def test_overridden_get_transform(self):
q = CustomModel.objects.filter(field__transformfunc_banana=3)
self.assertIn("banana()", str(q.query))
def test_overridden_get_lookup_chain(self):
q = CustomModel.objects.filter(
field__transformfunc_banana__lookupfunc_elephants=3
)
self.assertIn("elephants()", str(q.query))
def test_overridden_get_transform_chain(self):
q = CustomModel.objects.filter(
field__transformfunc_banana__transformfunc_pear=3
)
self.assertIn("pear()", str(q.query))
class SubqueryTransformTests(TestCase):
def test_subquery_usage(self):
with register_lookup(models.IntegerField, Div3Transform):
Author.objects.create(name="a1", age=1)
a2 = Author.objects.create(name="a2", age=2)
Author.objects.create(name="a3", age=3)
Author.objects.create(name="a4", age=4)
qs = Author.objects.order_by("name").filter(
id__in=Author.objects.filter(age__div3=2)
)
self.assertSequenceEqual(qs, [a2])
class RegisterLookupTests(SimpleTestCase):
def test_class_lookup(self):
author_name = Author._meta.get_field("name")
with register_lookup(models.CharField, CustomStartsWith):
self.assertEqual(author_name.get_lookup("sw"), CustomStartsWith)
self.assertIsNone(author_name.get_lookup("sw"))
def test_instance_lookup(self):
author_name = Author._meta.get_field("name")
author_alias = Author._meta.get_field("alias")
with register_lookup(author_name, CustomStartsWith):
self.assertEqual(author_name.instance_lookups, {"sw": CustomStartsWith})
self.assertEqual(author_name.get_lookup("sw"), CustomStartsWith)
self.assertIsNone(author_alias.get_lookup("sw"))
self.assertIsNone(author_name.get_lookup("sw"))
self.assertEqual(author_name.instance_lookups, {})
self.assertIsNone(author_alias.get_lookup("sw"))
def test_instance_lookup_override_class_lookups(self):
author_name = Author._meta.get_field("name")
author_alias = Author._meta.get_field("alias")
with register_lookup(models.CharField, CustomStartsWith, lookup_name="st_end"):
with register_lookup(author_alias, CustomEndsWith, lookup_name="st_end"):
self.assertEqual(author_name.get_lookup("st_end"), CustomStartsWith)
self.assertEqual(author_alias.get_lookup("st_end"), CustomEndsWith)
self.assertEqual(author_name.get_lookup("st_end"), CustomStartsWith)
self.assertEqual(author_alias.get_lookup("st_end"), CustomStartsWith)
self.assertIsNone(author_name.get_lookup("st_end"))
self.assertIsNone(author_alias.get_lookup("st_end"))
def test_instance_lookup_override(self):
author_name = Author._meta.get_field("name")
with register_lookup(author_name, CustomStartsWith, lookup_name="st_end"):
self.assertEqual(author_name.get_lookup("st_end"), CustomStartsWith)
author_name.register_lookup(CustomEndsWith, lookup_name="st_end")
self.assertEqual(author_name.get_lookup("st_end"), CustomEndsWith)
self.assertIsNone(author_name.get_lookup("st_end"))
def test_lookup_on_transform(self):
transform = Div3Transform
with register_lookup(Div3Transform, CustomStartsWith):
with register_lookup(Div3Transform, CustomEndsWith):
self.assertEqual(
transform.get_lookups(),
{"sw": CustomStartsWith, "ew": CustomEndsWith},
)
self.assertEqual(transform.get_lookups(), {"sw": CustomStartsWith})
self.assertEqual(transform.get_lookups(), {})
def test_transform_on_field(self):
author_name = Author._meta.get_field("name")
author_alias = Author._meta.get_field("alias")
with register_lookup(models.CharField, Div3Transform):
self.assertEqual(author_alias.get_transform("div3"), Div3Transform)
self.assertEqual(author_name.get_transform("div3"), Div3Transform)
with register_lookup(author_alias, Div3Transform):
self.assertEqual(author_alias.get_transform("div3"), Div3Transform)
self.assertIsNone(author_name.get_transform("div3"))
self.assertIsNone(author_alias.get_transform("div3"))
self.assertIsNone(author_name.get_transform("div3"))
def test_related_lookup(self):
article_author = Article._meta.get_field("author")
with register_lookup(models.Field, CustomStartsWith):
self.assertIsNone(article_author.get_lookup("sw"))
with register_lookup(models.ForeignKey, RelatedMoreThan):
self.assertEqual(article_author.get_lookup("rmt"), RelatedMoreThan)
def test_instance_related_lookup(self):
article_author = Article._meta.get_field("author")
with register_lookup(article_author, RelatedMoreThan):
self.assertEqual(article_author.get_lookup("rmt"), RelatedMoreThan)
self.assertIsNone(article_author.get_lookup("rmt"))
|
2206769b0f9326ebb549474ac0ff0ef1d0df74ad945a2bf18a05e54fab5696d3 | from django.core.exceptions import FieldDoesNotExist, FieldError
from django.test import SimpleTestCase, TestCase
from .models import (
BigChild,
Child,
ChildProxy,
Primary,
RefreshPrimaryProxy,
Secondary,
ShadowChild,
)
class AssertionMixin:
def assert_delayed(self, obj, num):
"""
Instances with deferred fields look the same as normal instances when
we examine attribute values. Therefore, this method returns the number
of deferred fields on returned instances.
"""
count = len(obj.get_deferred_fields())
self.assertEqual(count, num)
class DeferTests(AssertionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Secondary.objects.create(first="x1", second="y1")
cls.p1 = Primary.objects.create(name="p1", value="xx", related=cls.s1)
def test_defer(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
self.assert_delayed(qs.defer("related__first")[0], 0)
self.assert_delayed(qs.defer("name").defer("value")[0], 2)
def test_only(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name")[0], 2)
self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
self.assert_delayed(qs.only("name").only("value")[0], 2)
self.assert_delayed(qs.only("related__first")[0], 2)
# Using 'pk' with only() should result in 3 deferred fields, namely all
# of them except the model's primary key see #15494
self.assert_delayed(qs.only("pk")[0], 3)
# You can use 'pk' with reverse foreign key lookups.
# The related_id is always set even if it's not fetched from the DB,
# so pk and related_id are not deferred.
self.assert_delayed(self.s1.primary_set.only("pk")[0], 2)
def test_defer_only_chaining(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name", "value").defer("name")[0], 2)
self.assert_delayed(qs.defer("name").only("value", "name")[0], 2)
self.assert_delayed(qs.defer("name").only("name").only("value")[0], 2)
self.assert_delayed(qs.defer("name").only("value")[0], 2)
self.assert_delayed(qs.only("name").defer("value")[0], 2)
self.assert_delayed(qs.only("name").defer("name").defer("value")[0], 1)
self.assert_delayed(qs.only("name").defer("name", "value")[0], 1)
def test_defer_only_clear(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name").defer("name")[0], 0)
self.assert_delayed(qs.defer("name").only("name")[0], 0)
def test_defer_on_an_already_deferred_field(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.defer("name").defer("name")[0], 1)
def test_defer_none_to_clear_deferred_set(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name", "value")[0], 2)
self.assert_delayed(qs.defer(None)[0], 0)
self.assert_delayed(qs.only("name").defer(None)[0], 0)
def test_only_none_raises_error(self):
msg = "Cannot pass None as an argument to only()."
with self.assertRaisesMessage(TypeError, msg):
Primary.objects.only(None)
def test_defer_extra(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name").extra(select={"a": 1})[0], 1)
self.assert_delayed(qs.extra(select={"a": 1}).defer("name")[0], 1)
def test_defer_values_does_not_defer(self):
# User values() won't defer anything (you get the full list of
# dictionaries back), but it still works.
self.assertEqual(
Primary.objects.defer("name").values()[0],
{
"id": self.p1.id,
"name": "p1",
"value": "xx",
"related_id": self.s1.id,
},
)
def test_only_values_does_not_defer(self):
self.assertEqual(
Primary.objects.only("name").values()[0],
{
"id": self.p1.id,
"name": "p1",
"value": "xx",
"related_id": self.s1.id,
},
)
def test_get(self):
# Using defer() and only() with get() is also valid.
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
def test_defer_with_select_related(self):
obj = Primary.objects.select_related().defer(
"related__first", "related__second"
)[0]
self.assert_delayed(obj.related, 2)
self.assert_delayed(obj, 0)
def test_only_with_select_related(self):
obj = Primary.objects.select_related().only("related__first")[0]
self.assert_delayed(obj, 2)
self.assert_delayed(obj.related, 1)
self.assertEqual(obj.related_id, self.s1.pk)
self.assertEqual(obj.name, "p1")
def test_defer_foreign_keys_are_deferred_and_not_traversed(self):
# select_related() overrides defer().
with self.assertNumQueries(1):
obj = Primary.objects.defer("related").select_related()[0]
self.assert_delayed(obj, 1)
self.assertEqual(obj.related.id, self.s1.pk)
def test_saving_object_with_deferred_field(self):
# Saving models with deferred fields is possible (but inefficient,
# since every field has to be retrieved first).
Primary.objects.create(name="p2", value="xy", related=self.s1)
obj = Primary.objects.defer("value").get(name="p2")
obj.name = "a new name"
obj.save()
self.assertQuerysetEqual(
Primary.objects.all(),
[
"p1",
"a new name",
],
lambda p: p.name,
ordered=False,
)
def test_defer_baseclass_when_subclass_has_no_added_fields(self):
# Regression for #10572 - A subclass with no extra fields can defer
# fields from the base class
Child.objects.create(name="c1", value="foo", related=self.s1)
# You can defer a field on a baseclass when the subclass has no fields
obj = Child.objects.defer("value").get(name="c1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
def test_only_baseclass_when_subclass_has_no_added_fields(self):
# You can retrieve a single column on a base class with no fields
Child.objects.create(name="c1", value="foo", related=self.s1)
obj = Child.objects.only("name").get(name="c1")
# on an inherited model, its PK is also fetched, hence '3' deferred fields.
self.assert_delayed(obj, 3)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
def test_defer_of_overridden_scalar(self):
ShadowChild.objects.create()
obj = ShadowChild.objects.defer("name").get()
self.assertEqual(obj.name, "adonis")
class BigChildDeferTests(AssertionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Secondary.objects.create(first="x1", second="y1")
BigChild.objects.create(name="b1", value="foo", related=cls.s1, other="bar")
def test_defer_baseclass_when_subclass_has_added_field(self):
# You can defer a field on a baseclass
obj = BigChild.objects.defer("value").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_defer_subclass(self):
# You can defer a field on a subclass
obj = BigChild.objects.defer("other").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_defer_subclass_both(self):
# Deferring fields from both superclass and subclass works.
obj = BigChild.objects.defer("other", "value").get(name="b1")
self.assert_delayed(obj, 2)
def test_only_baseclass_when_subclass_has_added_field(self):
# You can retrieve a single field on a baseclass
obj = BigChild.objects.only("name").get(name="b1")
# when inherited model, its PK is also fetched, hence '4' deferred fields.
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_only_subclass(self):
# You can retrieve a single field on a subclass
obj = BigChild.objects.only("other").get(name="b1")
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
class TestDefer2(AssertionMixin, TestCase):
def test_defer_proxy(self):
"""
Ensure select_related together with only on a proxy model behaves
as expected. See #17876.
"""
related = Secondary.objects.create(first="x1", second="x2")
ChildProxy.objects.create(name="p1", value="xx", related=related)
children = ChildProxy.objects.select_related().only("id", "name")
self.assertEqual(len(children), 1)
child = children[0]
self.assert_delayed(child, 2)
self.assertEqual(child.name, "p1")
self.assertEqual(child.value, "xx")
def test_defer_inheritance_pk_chaining(self):
"""
When an inherited model is fetched from the DB, its PK is also fetched.
When getting the PK of the parent model it is useful to use the already
fetched parent model PK if it happens to be available.
"""
s1 = Secondary.objects.create(first="x1", second="y1")
bc = BigChild.objects.create(name="b1", value="foo", related=s1, other="bar")
bc_deferred = BigChild.objects.only("name").get(pk=bc.pk)
with self.assertNumQueries(0):
bc_deferred.id
self.assertEqual(bc_deferred.pk, bc_deferred.id)
def test_eq(self):
s1 = Secondary.objects.create(first="x1", second="y1")
s1_defer = Secondary.objects.only("pk").get(pk=s1.pk)
self.assertEqual(s1, s1_defer)
self.assertEqual(s1_defer, s1)
def test_refresh_not_loading_deferred_fields(self):
s = Secondary.objects.create()
rf = Primary.objects.create(name="foo", value="bar", related=s)
rf2 = Primary.objects.only("related", "value").get()
rf.name = "new foo"
rf.value = "new bar"
rf.save()
with self.assertNumQueries(1):
rf2.refresh_from_db()
self.assertEqual(rf2.value, "new bar")
with self.assertNumQueries(1):
self.assertEqual(rf2.name, "new foo")
def test_custom_refresh_on_deferred_loading(self):
s = Secondary.objects.create()
rf = RefreshPrimaryProxy.objects.create(name="foo", value="bar", related=s)
rf2 = RefreshPrimaryProxy.objects.only("related").get()
rf.name = "new foo"
rf.value = "new bar"
rf.save()
with self.assertNumQueries(1):
# Customized refresh_from_db() reloads all deferred fields on
# access of any of them.
self.assertEqual(rf2.name, "new foo")
self.assertEqual(rf2.value, "new bar")
class InvalidDeferTests(SimpleTestCase):
def test_invalid_defer(self):
msg = "Primary has no field named 'missing'"
with self.assertRaisesMessage(FieldDoesNotExist, msg):
list(Primary.objects.defer("missing"))
with self.assertRaisesMessage(FieldError, "missing"):
list(Primary.objects.defer("value__missing"))
msg = "Secondary has no field named 'missing'"
with self.assertRaisesMessage(FieldDoesNotExist, msg):
list(Primary.objects.defer("related__missing"))
def test_invalid_only(self):
msg = "Primary has no field named 'missing'"
with self.assertRaisesMessage(FieldDoesNotExist, msg):
list(Primary.objects.only("missing"))
with self.assertRaisesMessage(FieldError, "missing"):
list(Primary.objects.only("value__missing"))
msg = "Secondary has no field named 'missing'"
with self.assertRaisesMessage(FieldDoesNotExist, msg):
list(Primary.objects.only("related__missing"))
def test_defer_select_related_raises_invalid_query(self):
msg = (
"Field Primary.related cannot be both deferred and traversed using "
"select_related at the same time."
)
with self.assertRaisesMessage(FieldError, msg):
Primary.objects.defer("related").select_related("related")[0]
def test_only_select_related_raises_invalid_query(self):
msg = (
"Field Primary.related cannot be both deferred and traversed using "
"select_related at the same time."
)
with self.assertRaisesMessage(FieldError, msg):
Primary.objects.only("name").select_related("related")[0]
|
f06d5c6a84e9114aaf75a1bace00f9a101f57ff06a42211c1e7972f2c54294cc | """
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import copy
import difflib
import functools
import sys
from collections import Counter, namedtuple
from collections.abc import Iterator, Mapping
from itertools import chain, count, product
from string import ascii_uppercase
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections
from django.db.models.aggregates import Count
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import (
BaseExpression,
Col,
Exists,
F,
OuterRef,
Ref,
ResolvedOuterRef,
Value,
)
from django.db.models.fields import Field
from django.db.models.fields.related_lookups import MultiColSource
from django.db.models.lookups import Lookup
from django.db.models.query_utils import (
Q,
check_rel_lookup_compatibility,
refs_expression,
)
from django.db.models.sql.constants import INNER, LOUTER, ORDER_DIR, SINGLE
from django.db.models.sql.datastructures import BaseTable, Empty, Join, MultiJoin
from django.db.models.sql.where import AND, OR, ExtraWhere, NothingNode, WhereNode
from django.utils.functional import cached_property
from django.utils.regex_helper import _lazy_re_compile
from django.utils.tree import Node
__all__ = ["Query", "RawQuery"]
# Quotation marks ('"`[]), whitespace characters, semicolons, or inline
# SQL comments are forbidden in column aliases.
FORBIDDEN_ALIAS_PATTERN = _lazy_re_compile(r"['`\"\]\[;\s]|--|/\*|\*/")
# Inspired from
# https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS
EXPLAIN_OPTIONS_PATTERN = _lazy_re_compile(r"[\w\-]+")
def get_field_names_from_opts(opts):
if opts is None:
return set()
return set(
chain.from_iterable(
(f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields()
)
)
def get_children_from_q(q):
for child in q.children:
if isinstance(child, Node):
yield from get_children_from_q(child)
else:
yield child
JoinInfo = namedtuple(
"JoinInfo",
("final_field", "targets", "opts", "joins", "path", "transform_function"),
)
class RawQuery:
"""A single raw SQL query."""
def __init__(self, sql, using, params=()):
self.params = params
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.annotation_select = {}
def chain(self, using):
return self.clone(using)
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.identifier_converter
return [converter(column_meta[0]) for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
@property
def params_type(self):
if self.params is None:
return None
return dict if isinstance(self.params, Mapping) else tuple
def __str__(self):
if self.params_type is None:
return self.sql
return self.sql % self.params_type(self.params)
def _execute_query(self):
connection = connections[self.using]
# Adapt parameters to the database, as much as possible considering
# that the target type isn't known. See #17755.
params_type = self.params_type
adapter = connection.ops.adapt_unknown_value
if params_type is tuple:
params = tuple(adapter(val) for val in self.params)
elif params_type is dict:
params = {key: adapter(val) for key, val in self.params.items()}
elif params_type is None:
params = None
else:
raise RuntimeError("Unexpected params type: %s" % params_type)
self.cursor = connection.cursor()
self.cursor.execute(self.sql, params)
ExplainInfo = namedtuple("ExplainInfo", ("format", "options"))
class Query(BaseExpression):
"""A single SQL query."""
alias_prefix = "T"
empty_result_set_value = None
subq_aliases = frozenset([alias_prefix])
compiler = "SQLCompiler"
base_table_class = BaseTable
join_class = Join
default_cols = True
default_ordering = True
standard_ordering = True
filter_is_sticky = False
subquery = False
# SQL-related attributes.
# Select and related select clauses are expressions to use in the SELECT
# clause of the query. The select is used for cases where we want to set up
# the select clause to contain other than default fields (values(),
# subqueries...). Note that annotations go to annotations dictionary.
select = ()
# The group_by attribute can have one of the following forms:
# - None: no group by at all in the query
# - A tuple of expressions: group by (at least) those expressions.
# String refs are also allowed for now.
# - True: group by all select fields of the model
# See compiler.get_group_by() for details.
group_by = None
order_by = ()
low_mark = 0 # Used for offset/limit.
high_mark = None # Used for offset/limit.
distinct = False
distinct_fields = ()
select_for_update = False
select_for_update_nowait = False
select_for_update_skip_locked = False
select_for_update_of = ()
select_for_no_key_update = False
select_related = False
has_select_fields = False
# Arbitrary limit for select_related to prevents infinite recursion.
max_depth = 5
# Holds the selects defined by a call to values() or values_list()
# excluding annotation_select and extra_select.
values_select = ()
# SQL annotation-related attributes.
annotation_select_mask = None
_annotation_select_cache = None
# Set combination attributes.
combinator = None
combinator_all = False
combined_queries = ()
# These are for extensions. The contents are more or less appended verbatim
# to the appropriate clause.
extra_select_mask = None
_extra_select_cache = None
extra_tables = ()
extra_order_by = ()
# A tuple that is a set of model field names and either True, if these are
# the fields to defer, or False if these are the only fields to load.
deferred_loading = (frozenset(), True)
explain_info = None
def __init__(self, model, alias_cols=True):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# types they are. The key is the alias of the joined table (possibly
# the table name) and the value is a Join-like object (see
# sql.datastructures.Join for more information).
self.alias_map = {}
# Whether to provide alias to columns during reference resolving.
self.alias_cols = alias_cols
# Sometimes the query contains references to aliases in outer queries (as
# a result of split_exclude). Correct alias quoting needs to know these
# aliases too.
# Map external tables to whether they are aliased.
self.external_aliases = {}
self.table_map = {} # Maps table names to list of aliases.
self.used_aliases = set()
self.where = WhereNode()
# Maps alias -> Annotation Expression.
self.annotations = {}
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = {} # Maps col_alias -> (col_sql, params).
self._filtered_relations = {}
@property
def output_field(self):
if len(self.select) == 1:
select = self.select[0]
return getattr(select, "target", None) or select.field
elif len(self.annotation_select) == 1:
return next(iter(self.annotation_select.values())).output_field
@cached_property
def base_table(self):
for alias in self.alias_map:
return alias
def __str__(self):
"""
Return the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Return the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
"""Limit the amount of work when a Query is deepcopied."""
result = self.clone()
memo[id(self)] = result
return result
def get_compiler(self, using=None, connection=None, elide_empty=True):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
return connection.ops.compiler(self.compiler)(
self, connection, using, elide_empty
)
def get_meta(self):
"""
Return the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
if self.model:
return self.model._meta
def clone(self):
"""
Return a copy of the current Query. A lightweight alternative to
deepcopy().
"""
obj = Empty()
obj.__class__ = self.__class__
# Copy references to everything.
obj.__dict__ = self.__dict__.copy()
# Clone attributes that can't use shallow copy.
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.external_aliases = self.external_aliases.copy()
obj.table_map = self.table_map.copy()
obj.where = self.where.clone()
obj.annotations = self.annotations.copy()
if self.annotation_select_mask is not None:
obj.annotation_select_mask = self.annotation_select_mask.copy()
if self.combined_queries:
obj.combined_queries = tuple(
[query.clone() for query in self.combined_queries]
)
# _annotation_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both annotations and
# _annotation_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._annotation_select_cache = None
obj.extra = self.extra.copy()
if self.extra_select_mask is not None:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is not None:
obj._extra_select_cache = self._extra_select_cache.copy()
if self.select_related is not False:
# Use deepcopy because select_related stores fields in nested
# dicts.
obj.select_related = copy.deepcopy(obj.select_related)
if "subq_aliases" in self.__dict__:
obj.subq_aliases = self.subq_aliases.copy()
obj.used_aliases = self.used_aliases.copy()
obj._filtered_relations = self._filtered_relations.copy()
# Clear the cached_property, if it exists.
obj.__dict__.pop("base_table", None)
return obj
def chain(self, klass=None):
"""
Return a copy of the current Query that's ready for another operation.
The klass argument changes the type of the Query, e.g. UpdateQuery.
"""
obj = self.clone()
if klass and obj.__class__ != klass:
obj.__class__ = klass
if not obj.filter_is_sticky:
obj.used_aliases = set()
obj.filter_is_sticky = False
if hasattr(obj, "_setup_query"):
obj._setup_query()
return obj
def relabeled_clone(self, change_map):
clone = self.clone()
clone.change_aliases(change_map)
return clone
def _get_col(self, target, field, alias):
if not self.alias_cols:
alias = None
return target.get_col(alias, field)
def rewrite_cols(self, annotation, col_cnt):
# We must make sure the inner query has the referred columns in it.
# If we are aggregating over an annotation, then Django uses Ref()
# instances to note this. However, if we are annotating over a column
# of a related model, then it might be that column isn't part of the
# SELECT clause of the inner query, and we must manually make sure
# the column is selected. An example case is:
# .aggregate(Sum('author__awards'))
# Resolving this expression results in a join to author, but there
# is no guarantee the awards column of author is in the select clause
# of the query. Thus we must manually add the column to the inner
# query.
orig_exprs = annotation.get_source_expressions()
new_exprs = []
for expr in orig_exprs:
# FIXME: These conditions are fairly arbitrary. Identify a better
# method of having expressions decide which code path they should
# take.
if isinstance(expr, Ref):
# Its already a Ref to subquery (see resolve_ref() for
# details)
new_exprs.append(expr)
elif isinstance(expr, (WhereNode, Lookup)):
# Decompose the subexpressions further. The code here is
# copied from the else clause, but this condition must appear
# before the contains_aggregate/is_summary condition below.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
else:
# Reuse aliases of expressions already selected in subquery.
for col_alias, selected_annotation in self.annotation_select.items():
if selected_annotation is expr:
new_expr = Ref(col_alias, expr)
break
else:
# An expression that is not selected the subquery.
if isinstance(expr, Col) or (
expr.contains_aggregate and not expr.is_summary
):
# Reference column or another aggregate. Select it
# under a non-conflicting alias.
col_cnt += 1
col_alias = "__col%d" % col_cnt
self.annotations[col_alias] = expr
self.append_annotation_mask([col_alias])
new_expr = Ref(col_alias, expr)
else:
# Some other expression not referencing database values
# directly. Its subexpression might contain Cols.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
annotation.set_source_expressions(new_exprs)
return annotation, col_cnt
def get_aggregation(self, using, added_aggregate_names):
"""
Return the dictionary with the values of the existing aggregations.
"""
if not self.annotation_select:
return {}
existing_annotations = [
annotation
for alias, annotation in self.annotations.items()
if alias not in added_aggregate_names
]
# Decide if we need to use a subquery.
#
# Existing annotations would cause incorrect results as get_aggregation()
# must produce just one result and thus must not use GROUP BY. But we
# aren't smart enough to remove the existing annotations from the
# query, so those would force us to use GROUP BY.
#
# If the query has limit or distinct, or uses set operations, then
# those operations must be done in a subquery so that the query
# aggregates on the limit and/or distinct results instead of applying
# the distinct and limit after the aggregation.
if (
isinstance(self.group_by, tuple)
or self.is_sliced
or existing_annotations
or self.distinct
or self.combinator
):
from django.db.models.sql.subqueries import AggregateQuery
inner_query = self.clone()
inner_query.subquery = True
outer_query = AggregateQuery(self.model, inner_query)
inner_query.select_for_update = False
inner_query.select_related = False
inner_query.set_annotation_mask(self.annotation_select)
# Queries with distinct_fields need ordering and when a limit is
# applied we must take the slice from the ordered query. Otherwise
# no need for ordering.
inner_query.clear_ordering(force=False)
if not inner_query.distinct:
# If the inner query uses default select and it has some
# aggregate annotations, then we must make sure the inner
# query is grouped by the main model's primary key. However,
# clearing the select clause can alter results if distinct is
# used.
has_existing_aggregate_annotations = any(
annotation
for annotation in existing_annotations
if getattr(annotation, "contains_aggregate", True)
)
if inner_query.default_cols and has_existing_aggregate_annotations:
inner_query.group_by = (
self.model._meta.pk.get_col(inner_query.get_initial_alias()),
)
inner_query.default_cols = False
relabels = {t: "subquery" for t in inner_query.alias_map}
relabels[None] = "subquery"
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
col_cnt = 0
for alias, expression in list(inner_query.annotation_select.items()):
annotation_select_mask = inner_query.annotation_select_mask
if expression.is_summary:
expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt)
outer_query.annotations[alias] = expression.relabeled_clone(
relabels
)
del inner_query.annotations[alias]
annotation_select_mask.remove(alias)
# Make sure the annotation_select wont use cached results.
inner_query.set_annotation_mask(inner_query.annotation_select_mask)
if (
inner_query.select == ()
and not inner_query.default_cols
and not inner_query.annotation_select_mask
):
# In case of Model.objects[0:3].count(), there would be no
# field selected in the inner query, yet we must use a subquery.
# So, make sure at least one field is selected.
inner_query.select = (
self.model._meta.pk.get_col(inner_query.get_initial_alias()),
)
else:
outer_query = self
self.select = ()
self.default_cols = False
self.extra = {}
empty_set_result = [
expression.empty_result_set_value
for expression in outer_query.annotation_select.values()
]
elide_empty = not any(result is NotImplemented for result in empty_set_result)
outer_query.clear_ordering(force=True)
outer_query.clear_limits()
outer_query.select_for_update = False
outer_query.select_related = False
compiler = outer_query.get_compiler(using, elide_empty=elide_empty)
result = compiler.execute_sql(SINGLE)
if result is None:
result = empty_set_result
converters = compiler.get_converters(outer_query.annotation_select.values())
result = next(compiler.apply_converters((result,), converters))
return dict(zip(outer_query.annotation_select, result))
def get_count(self, using):
"""
Perform a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count("*"), alias="__count", is_summary=True)
return obj.get_aggregation(using, ["__count"])["__count"]
def has_filters(self):
return self.where
def exists(self, using, limit=True):
q = self.clone()
if not (q.distinct and q.is_sliced):
if q.group_by is True:
q.add_fields(
(f.attname for f in self.model._meta.concrete_fields), False
)
# Disable GROUP BY aliases to avoid orphaning references to the
# SELECT clause which is about to be cleared.
q.set_group_by(allow_aliases=False)
q.clear_select_clause()
if q.combined_queries and q.combinator == "union":
limit_combined = connections[
using
].features.supports_slicing_ordering_in_compound
q.combined_queries = tuple(
combined_query.exists(using, limit=limit_combined)
for combined_query in q.combined_queries
)
q.clear_ordering(force=True)
if limit:
q.set_limits(high=1)
q.add_annotation(Value(1), "a")
return q
def has_results(self, using):
q = self.exists(using)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def explain(self, using, format=None, **options):
q = self.clone()
for option_name in options:
if (
not EXPLAIN_OPTIONS_PATTERN.fullmatch(option_name)
or "--" in option_name
):
raise ValueError(f"Invalid option name: {option_name!r}.")
q.explain_info = ExplainInfo(format, options)
compiler = q.get_compiler(using=using)
return "\n".join(compiler.explain_query())
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
if self.model != rhs.model:
raise TypeError("Cannot combine queries on two different base models.")
if self.is_sliced:
raise TypeError("Cannot combine queries once a slice has been taken.")
if self.distinct != rhs.distinct:
raise TypeError("Cannot combine a unique query with a non-unique query.")
if self.distinct_fields != rhs.distinct_fields:
raise TypeError("Cannot combine queries with different distinct fields.")
# If lhs and rhs shares the same alias prefix, it is possible to have
# conflicting alias changes like T4 -> T5, T5 -> T6, which might end up
# as T4 -> T6 while combining two querysets. To prevent this, change an
# alias prefix of the rhs and update current aliases accordingly,
# except if the alias is the base table since it must be present in the
# query on both sides.
initial_alias = self.get_initial_alias()
rhs.bump_prefix(self, exclude={initial_alias})
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = connector == AND
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.alias_map)
joinpromoter = JoinPromoter(connector, 2, False)
joinpromoter.add_votes(
j for j in self.alias_map if self.alias_map[j].join_type == INNER
)
rhs_votes = set()
# Now, add the joins from rhs query into the new query (skipping base
# table).
rhs_tables = list(rhs.alias_map)[1:]
for alias in rhs_tables:
join = rhs.alias_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
join = join.relabeled_clone(change_map)
new_alias = self.join(join, reuse=reuse)
if join.join_type == INNER:
rhs_votes.add(new_alias)
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
if alias != new_alias:
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
joinpromoter.add_votes(rhs_votes)
joinpromoter.update_join_types(self)
# Combine subqueries aliases to ensure aliases relabelling properly
# handle subqueries when combining where and select clauses.
self.subq_aliases |= rhs.subq_aliases
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
w = rhs.where.clone()
w.relabel_aliases(change_map)
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
if rhs.select:
self.set_select([col.relabeled_clone(change_map) for col in rhs.select])
else:
self.select = ()
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError(
"When merging querysets using 'or', you cannot have "
"extra(select=...) on both sides."
)
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by or self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def _get_defer_select_mask(self, opts, mask, select_mask=None):
if select_mask is None:
select_mask = {}
select_mask[opts.pk] = {}
# All concrete fields that are not part of the defer mask must be
# loaded. If a relational field is encountered it gets added to the
# mask for it be considered if `select_related` and the cycle continues
# by recursively caling this function.
for field in opts.concrete_fields:
field_mask = mask.pop(field.name, None)
if field_mask is None:
select_mask.setdefault(field, {})
elif field_mask:
if not field.is_relation:
raise FieldError(next(iter(field_mask)))
field_select_mask = select_mask.setdefault(field, {})
related_model = field.remote_field.model._meta.concrete_model
self._get_defer_select_mask(
related_model._meta, field_mask, field_select_mask
)
# Remaining defer entries must be references to reverse relationships.
# The following code is expected to raise FieldError if it encounters
# a malformed defer entry.
for field_name, field_mask in mask.items():
if filtered_relation := self._filtered_relations.get(field_name):
relation = opts.get_field(filtered_relation.relation_name)
field_select_mask = select_mask.setdefault((field_name, relation), {})
field = relation.field
else:
field = opts.get_field(field_name).field
field_select_mask = select_mask.setdefault(field, {})
related_model = field.model._meta.concrete_model
self._get_defer_select_mask(
related_model._meta, field_mask, field_select_mask
)
return select_mask
def _get_only_select_mask(self, opts, mask, select_mask=None):
if select_mask is None:
select_mask = {}
select_mask[opts.pk] = {}
# Only include fields mentioned in the mask.
for field_name, field_mask in mask.items():
field = opts.get_field(field_name)
field_select_mask = select_mask.setdefault(field, {})
if field_mask:
if not field.is_relation:
raise FieldError(next(iter(field_mask)))
related_model = field.remote_field.model._meta.concrete_model
self._get_only_select_mask(
related_model._meta, field_mask, field_select_mask
)
return select_mask
def get_select_mask(self):
"""
Convert the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
"""
field_names, defer = self.deferred_loading
if not field_names:
return {}
mask = {}
for field_name in field_names:
part_mask = mask
for part in field_name.split(LOOKUP_SEP):
part_mask = part_mask.setdefault(part, {})
opts = self.get_meta()
if defer:
return self._get_defer_select_mask(opts, mask)
return self._get_only_select_mask(opts, mask)
def table_alias(self, table_name, create=False, filtered_relation=None):
"""
Return a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = "%s%d" % (self.alias_prefix, len(self.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = (
filtered_relation.alias if filtered_relation is not None else table_name
)
self.table_map[table_name] = [alias]
self.alias_refcount[alias] = 1
return alias, True
def ref_alias(self, alias):
"""Increases the reference count for this alias."""
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
"""Decreases the reference count for this alias."""
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promote recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, only promote the join if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
then we must also promote b->c automatically, or otherwise the promotion
of a->b doesn't actually change anything in the query results.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
# Only the first alias (skipped above) should have None join_type
assert self.alias_map[alias].join_type is not None
parent_alias = self.alias_map[alias].parent_alias
parent_louter = (
parent_alias and self.alias_map[parent_alias].join_type == LOUTER
)
already_louter = self.alias_map[alias].join_type == LOUTER
if (self.alias_map[alias].nullable or parent_louter) and not already_louter:
self.alias_map[alias] = self.alias_map[alias].promote()
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join
for join in self.alias_map
if self.alias_map[join].parent_alias == alias
and join not in aliases
)
def demote_joins(self, aliases):
"""
Change join type from LOUTER to INNER for all joins in aliases.
Similarly to promote_joins(), this method must ensure no join chains
containing first an outer, then an inner join are generated. If we
are demoting b->c join in chain a LOUTER b LOUTER c then we must
demote a->b automatically, or otherwise the demotion of b->c doesn't
actually change anything in the query results. .
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type == LOUTER:
self.alias_map[alias] = self.alias_map[alias].demote()
parent_alias = self.alias_map[alias].parent_alias
if self.alias_map[parent_alias].join_type == INNER:
aliases.append(parent_alias)
def reset_refcounts(self, to_counts):
"""
Reset reference counts for aliases so that they match the value passed
in `to_counts`.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def change_aliases(self, change_map):
"""
Change the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
# If keys and values of change_map were to intersect, an alias might be
# updated twice (e.g. T4 -> T5, T5 -> T6, so also T4 -> T6) depending
# on their order in change_map.
assert set(change_map).isdisjoint(change_map.values())
# 1. Update references in "select" (normal columns plus aliases),
# "group by" and "where".
self.where.relabel_aliases(change_map)
if isinstance(self.group_by, tuple):
self.group_by = tuple(
[col.relabeled_clone(change_map) for col in self.group_by]
)
self.select = tuple([col.relabeled_clone(change_map) for col in self.select])
self.annotations = self.annotations and {
key: col.relabeled_clone(change_map)
for key, col in self.annotations.items()
}
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.items():
if old_alias not in self.alias_map:
continue
alias_data = self.alias_map[old_alias].relabeled_clone(change_map)
self.alias_map[new_alias] = alias_data
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
self.external_aliases = {
# Table is aliased or it's being changed and thus is aliased.
change_map.get(alias, alias): (aliased or alias in change_map)
for alias, aliased in self.external_aliases.items()
}
def bump_prefix(self, other_query, exclude=None):
"""
Change the alias prefix to the next letter in the alphabet in a way
that the other query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call. To prevent changing aliases use the exclude parameter.
"""
def prefix_gen():
"""
Generate a sequence of characters in alphabetical order:
-> 'A', 'B', 'C', ...
When the alphabet is finished, the sequence will continue with the
Cartesian product:
-> 'AA', 'AB', 'AC', ...
"""
alphabet = ascii_uppercase
prefix = chr(ord(self.alias_prefix) + 1)
yield prefix
for n in count(1):
seq = alphabet[alphabet.index(prefix) :] if prefix else alphabet
for s in product(seq, repeat=n):
yield "".join(s)
prefix = None
if self.alias_prefix != other_query.alias_prefix:
# No clashes between self and outer query should be possible.
return
# Explicitly avoid infinite loop. The constant divider is based on how
# much depth recursive subquery references add to the stack. This value
# might need to be adjusted when adding or removing function calls from
# the code path in charge of performing these operations.
local_recursion_limit = sys.getrecursionlimit() // 16
for pos, prefix in enumerate(prefix_gen()):
if prefix not in self.subq_aliases:
self.alias_prefix = prefix
break
if pos > local_recursion_limit:
raise RecursionError(
"Maximum recursion depth exceeded: too many subqueries."
)
self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
other_query.subq_aliases = other_query.subq_aliases.union(self.subq_aliases)
if exclude is None:
exclude = {}
self.change_aliases(
{
alias: "%s%d" % (self.alias_prefix, pos)
for pos, alias in enumerate(self.alias_map)
if alias not in exclude
}
)
def get_initial_alias(self):
"""
Return the first alias for this query, after increasing its reference
count.
"""
if self.alias_map:
alias = self.base_table
self.ref_alias(alias)
elif self.model:
alias = self.join(self.base_table_class(self.get_meta().db_table, None))
else:
alias = None
return alias
def count_active_tables(self):
"""
Return the number of tables in this query with a non-zero reference
count. After execution, the reference counts are zeroed, so tables
added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, join, reuse=None, reuse_with_filtered_relation=False):
"""
Return an alias for the 'join', either reusing an existing alias for
that join or creating a new one. 'join' is either a base_table_class or
join_class.
The 'reuse' parameter can be either None which means all joins are
reusable, or it can be a set containing the aliases that can be reused.
The 'reuse_with_filtered_relation' parameter is used when computing
FilteredRelation instances.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new
joins are created as LOUTER if the join is nullable.
"""
if reuse_with_filtered_relation and reuse:
reuse_aliases = [
a for a, j in self.alias_map.items() if a in reuse and j.equals(join)
]
else:
reuse_aliases = [
a
for a, j in self.alias_map.items()
if (reuse is None or a in reuse) and j == join
]
if reuse_aliases:
if join.table_alias in reuse_aliases:
reuse_alias = join.table_alias
else:
# Reuse the most recent alias of the joined table
# (a many-to-many relation may be joined multiple times).
reuse_alias = reuse_aliases[-1]
self.ref_alias(reuse_alias)
return reuse_alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(
join.table_name, create=True, filtered_relation=join.filtered_relation
)
if join.join_type:
if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:
join_type = LOUTER
else:
join_type = INNER
join.join_type = join_type
join.table_alias = alias
self.alias_map[alias] = join
return alias
def join_parent_model(self, opts, model, alias, seen):
"""
Make sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if not chain:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
curr_opts = int_model._meta
alias = seen[int_model]
continue
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
join_info = self.setup_joins([link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = join_info.joins[-1]
return alias or seen[None]
def check_alias(self, alias):
if FORBIDDEN_ALIAS_PATTERN.search(alias):
raise ValueError(
"Column aliases cannot contain whitespace characters, quotation marks, "
"semicolons, or SQL comments."
)
def add_annotation(self, annotation, alias, is_summary=False, select=True):
"""Add a single annotation expression to the Query."""
self.check_alias(alias)
annotation = annotation.resolve_expression(
self, allow_joins=True, reuse=None, summarize=is_summary
)
if select:
self.append_annotation_mask([alias])
else:
self.set_annotation_mask(set(self.annotation_select).difference({alias}))
self.annotations[alias] = annotation
def resolve_expression(self, query, *args, **kwargs):
clone = self.clone()
# Subqueries need to use a different set of aliases than the outer query.
clone.bump_prefix(query)
clone.subquery = True
clone.where.resolve_expression(query, *args, **kwargs)
# Resolve combined queries.
if clone.combinator:
clone.combined_queries = tuple(
[
combined_query.resolve_expression(query, *args, **kwargs)
for combined_query in clone.combined_queries
]
)
for key, value in clone.annotations.items():
resolved = value.resolve_expression(query, *args, **kwargs)
if hasattr(resolved, "external_aliases"):
resolved.external_aliases.update(clone.external_aliases)
clone.annotations[key] = resolved
# Outer query's aliases are considered external.
for alias, table in query.alias_map.items():
clone.external_aliases[alias] = (
isinstance(table, Join)
and table.join_field.related_model._meta.db_table != alias
) or (
isinstance(table, BaseTable) and table.table_name != table.table_alias
)
return clone
def get_external_cols(self):
exprs = chain(self.annotations.values(), self.where.children)
return [
col
for col in self._gen_cols(exprs, include_external=True)
if col.alias in self.external_aliases
]
def get_group_by_cols(self, alias=None):
if alias:
return [Ref(alias, self)]
external_cols = self.get_external_cols()
if any(col.possibly_multivalued for col in external_cols):
return [self]
return external_cols
def as_sql(self, compiler, connection):
# Some backends (e.g. Oracle) raise an error when a subquery contains
# unnecessary ORDER BY clause.
if (
self.subquery
and not connection.features.ignores_unnecessary_order_by_in_subqueries
):
self.clear_ordering(force=False)
for query in self.combined_queries:
query.clear_ordering(force=False)
sql, params = self.get_compiler(connection=connection).as_sql()
if self.subquery:
sql = "(%s)" % sql
return sql, params
def resolve_lookup_value(self, value, can_reuse, allow_joins):
if hasattr(value, "resolve_expression"):
value = value.resolve_expression(
self,
reuse=can_reuse,
allow_joins=allow_joins,
)
elif isinstance(value, (list, tuple)):
# The items of the iterable may be expressions and therefore need
# to be resolved independently.
values = (
self.resolve_lookup_value(sub_value, can_reuse, allow_joins)
for sub_value in value
)
type_ = type(value)
if hasattr(type_, "_make"): # namedtuple
return type_(*values)
return type_(values)
return value
def solve_lookup_type(self, lookup):
"""
Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains').
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self.annotations:
expression, expression_lookups = refs_expression(
lookup_splitted, self.annotations
)
if expression:
return expression_lookups, (), expression
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
field_parts = lookup_splitted[0 : len(lookup_splitted) - len(lookup_parts)]
if len(lookup_parts) > 1 and not field_parts:
raise FieldError(
'Invalid lookup "%s" for model %s".'
% (lookup, self.get_meta().model.__name__)
)
return lookup_parts, field_parts, False
def check_query_object_type(self, value, opts, field):
"""
Check whether the object passed while querying is of the correct type.
If not, raise a ValueError specifying the wrong object.
"""
if hasattr(value, "_meta"):
if not check_rel_lookup_compatibility(value._meta.model, opts, field):
raise ValueError(
'Cannot query "%s": Must be "%s" instance.'
% (value, opts.object_name)
)
def check_related_objects(self, field, value, opts):
"""Check the type of object passed to query relations."""
if field.is_relation:
# Check that the field and the queryset use the same model in a
# query like .filter(author=Author.objects.all()). For example, the
# opts would be Author's (from the author field) and value.model
# would be Author.objects.all() queryset's .model (Author also).
# The field is the related field on the lhs side.
if (
isinstance(value, Query)
and not value.has_select_fields
and not check_rel_lookup_compatibility(value.model, opts, field)
):
raise ValueError(
'Cannot use QuerySet for "%s": Use a QuerySet for "%s".'
% (value.model._meta.object_name, opts.object_name)
)
elif hasattr(value, "_meta"):
self.check_query_object_type(value, opts, field)
elif hasattr(value, "__iter__"):
for v in value:
self.check_query_object_type(v, opts, field)
def check_filterable(self, expression):
"""Raise an error if expression cannot be used in a WHERE clause."""
if hasattr(expression, "resolve_expression") and not getattr(
expression, "filterable", True
):
raise NotSupportedError(
expression.__class__.__name__ + " is disallowed in the filter "
"clause."
)
if hasattr(expression, "get_source_expressions"):
for expr in expression.get_source_expressions():
self.check_filterable(expr)
def build_lookup(self, lookups, lhs, rhs):
"""
Try to extract transforms and lookup from given lhs.
The lhs value is something that works like SQLExpression.
The rhs value is what the lookup is going to compare against.
The lookups is a list of names to extract using get_lookup()
and get_transform().
"""
# __exact is the default lookup if one isn't given.
*transforms, lookup_name = lookups or ["exact"]
for name in transforms:
lhs = self.try_transform(lhs, name)
# First try get_lookup() so that the lookup takes precedence if the lhs
# supports both transform and lookup for the name.
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
# A lookup wasn't found. Try to interpret the name as a transform
# and do an Exact lookup against it.
lhs = self.try_transform(lhs, lookup_name)
lookup_name = "exact"
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
return
lookup = lookup_class(lhs, rhs)
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value unless the lookup supports it.
if lookup.rhs is None and not lookup.can_use_none_as_rhs:
if lookup_name not in ("exact", "iexact"):
raise ValueError("Cannot use None as a query value")
return lhs.get_lookup("isnull")(lhs, True)
# For Oracle '' is equivalent to null. The check must be done at this
# stage because join promotion can't be done in the compiler. Using
# DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here.
# A similar thing is done in is_nullable(), too.
if (
lookup_name == "exact"
and lookup.rhs == ""
and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
):
return lhs.get_lookup("isnull")(lhs, True)
return lookup
def try_transform(self, lhs, name):
"""
Helper method for build_lookup(). Try to fetch and initialize
a transform for name parameter from lhs.
"""
transform_class = lhs.get_transform(name)
if transform_class:
return transform_class(lhs)
else:
output_field = lhs.output_field.__class__
suggested_lookups = difflib.get_close_matches(
name, output_field.get_lookups()
)
if suggested_lookups:
suggestion = ", perhaps you meant %s?" % " or ".join(suggested_lookups)
else:
suggestion = "."
raise FieldError(
"Unsupported lookup '%s' for %s or join on the field not "
"permitted%s" % (name, output_field.__name__, suggestion)
)
def build_filter(
self,
filter_expr,
branch_negated=False,
current_negated=False,
can_reuse=None,
allow_joins=True,
split_subq=True,
reuse_with_filtered_relation=False,
check_filterable=True,
):
"""
Build a WhereNode for a single filter clause but don't add it
to this Query. Query.add_q() will then add this filter to the where
Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_negated and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
If 'reuse_with_filtered_relation' is True, then only joins in can_reuse
will be reused.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
if isinstance(filter_expr, dict):
raise FieldError("Cannot parse keyword query as dict")
if isinstance(filter_expr, Q):
return self._add_q(
filter_expr,
branch_negated=branch_negated,
current_negated=current_negated,
used_aliases=can_reuse,
allow_joins=allow_joins,
split_subq=split_subq,
check_filterable=check_filterable,
)
if hasattr(filter_expr, "resolve_expression"):
if not getattr(filter_expr, "conditional", False):
raise TypeError("Cannot filter against a non-conditional expression.")
condition = filter_expr.resolve_expression(self, allow_joins=allow_joins)
if not isinstance(condition, Lookup):
condition = self.build_lookup(["exact"], condition, True)
return WhereNode([condition], connector=AND), []
arg, value = filter_expr
if not arg:
raise FieldError("Cannot parse keyword query %r" % arg)
lookups, parts, reffed_expression = self.solve_lookup_type(arg)
if check_filterable:
self.check_filterable(reffed_expression)
if not allow_joins and len(parts) > 1:
raise FieldError("Joined field references are not permitted in this query")
pre_joins = self.alias_refcount.copy()
value = self.resolve_lookup_value(value, can_reuse, allow_joins)
used_joins = {
k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)
}
if check_filterable:
self.check_filterable(value)
if reffed_expression:
condition = self.build_lookup(lookups, reffed_expression, value)
return WhereNode([condition], connector=AND), []
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated or not split_subq
try:
join_info = self.setup_joins(
parts,
opts,
alias,
can_reuse=can_reuse,
allow_many=allow_many,
reuse_with_filtered_relation=reuse_with_filtered_relation,
)
# Prevent iterator from being consumed by check_related_objects()
if isinstance(value, Iterator):
value = list(value)
self.check_related_objects(join_info.final_field, value, join_info.opts)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_info.joins
except MultiJoin as e:
return self.split_exclude(filter_expr, can_reuse, e.names_with_path)
# Update used_joins before trimming since they are reused to determine
# which joins could be later promoted to INNER.
used_joins.update(join_info.joins)
targets, alias, join_list = self.trim_joins(
join_info.targets, join_info.joins, join_info.path
)
if can_reuse is not None:
can_reuse.update(join_list)
if join_info.final_field.is_relation:
if len(targets) == 1:
col = self._get_col(targets[0], join_info.final_field, alias)
else:
col = MultiColSource(
alias, targets, join_info.targets, join_info.final_field
)
else:
col = self._get_col(targets[0], join_info.final_field, alias)
condition = self.build_lookup(lookups, col, value)
lookup_type = condition.lookup_name
clause = WhereNode([condition], connector=AND)
require_outer = (
lookup_type == "isnull" and condition.rhs is True and not current_negated
)
if (
current_negated
and (lookup_type != "isnull" or condition.rhs is False)
and condition.rhs is not None
):
require_outer = True
if lookup_type != "isnull":
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
if (
self.is_nullable(targets[0])
or self.alias_map[join_list[-1]].join_type == LOUTER
):
lookup_class = targets[0].get_lookup("isnull")
col = self._get_col(targets[0], join_info.targets[0], alias)
clause.add(lookup_class(col, False), AND)
# If someval is a nullable column, someval IS NOT NULL is
# added.
if isinstance(value, Col) and self.is_nullable(value.target):
lookup_class = value.target.get_lookup("isnull")
clause.add(lookup_class(value, False), AND)
return clause, used_joins if not require_outer else ()
def add_filter(self, filter_lhs, filter_rhs):
self.add_q(Q((filter_lhs, filter_rhs)))
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for doing final
join promotion.
"""
# For join promotion this case is doing an AND for the added q_object
# and existing conditions. So, any existing inner join forces the join
# type to remain inner. Existing outer joins can however be demoted.
# (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
# rel_a doesn't produce any rows, then the whole condition must fail.
# So, demotion is OK.
existing_inner = {
a for a in self.alias_map if self.alias_map[a].join_type == INNER
}
clause, _ = self._add_q(q_object, self.used_aliases)
if clause:
self.where.add(clause, AND)
self.demote_joins(existing_inner)
def build_where(self, filter_expr):
return self.build_filter(filter_expr, allow_joins=False)[0]
def clear_where(self):
self.where = WhereNode()
def _add_q(
self,
q_object,
used_aliases,
branch_negated=False,
current_negated=False,
allow_joins=True,
split_subq=True,
check_filterable=True,
):
"""Add a Q-object to the current filter."""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = WhereNode(connector=connector, negated=q_object.negated)
joinpromoter = JoinPromoter(
q_object.connector, len(q_object.children), current_negated
)
for child in q_object.children:
child_clause, needed_inner = self.build_filter(
child,
can_reuse=used_aliases,
branch_negated=branch_negated,
current_negated=current_negated,
allow_joins=allow_joins,
split_subq=split_subq,
check_filterable=check_filterable,
)
joinpromoter.add_votes(needed_inner)
if child_clause:
target_clause.add(child_clause, connector)
needed_inner = joinpromoter.update_join_types(self)
return target_clause, needed_inner
def build_filtered_relation_q(
self, q_object, reuse, branch_negated=False, current_negated=False
):
"""Add a FilteredRelation object to the current filter."""
connector = q_object.connector
current_negated ^= q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = WhereNode(connector=connector, negated=q_object.negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause = self.build_filtered_relation_q(
child,
reuse=reuse,
branch_negated=branch_negated,
current_negated=current_negated,
)
else:
child_clause, _ = self.build_filter(
child,
can_reuse=reuse,
branch_negated=branch_negated,
current_negated=current_negated,
allow_joins=True,
split_subq=False,
reuse_with_filtered_relation=True,
)
target_clause.add(child_clause, connector)
return target_clause
def add_filtered_relation(self, filtered_relation, alias):
filtered_relation.alias = alias
lookups = dict(get_children_from_q(filtered_relation.condition))
relation_lookup_parts, relation_field_parts, _ = self.solve_lookup_type(
filtered_relation.relation_name
)
if relation_lookup_parts:
raise ValueError(
"FilteredRelation's relation_name cannot contain lookups "
"(got %r)." % filtered_relation.relation_name
)
for lookup in chain(lookups):
lookup_parts, lookup_field_parts, _ = self.solve_lookup_type(lookup)
shift = 2 if not lookup_parts else 1
lookup_field_path = lookup_field_parts[:-shift]
for idx, lookup_field_part in enumerate(lookup_field_path):
if len(relation_field_parts) > idx:
if relation_field_parts[idx] != lookup_field_part:
raise ValueError(
"FilteredRelation's condition doesn't support "
"relations outside the %r (got %r)."
% (filtered_relation.relation_name, lookup)
)
else:
raise ValueError(
"FilteredRelation's condition doesn't support nested "
"relations deeper than the relation_name (got %r for "
"%r)." % (lookup, filtered_relation.relation_name)
)
self._filtered_relations[filtered_relation.alias] = filtered_relation
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walk the list of names and turns them into PathInfo tuples. A single
name in 'names' can generate multiple PathInfos (m2m, for example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
If fail_on_missing is set to True, then a name that can't be resolved
will generate a FieldError.
Return a list of PathInfo tuples. In addition return the final field
(the last used join field) and target (which is a field guaranteed to
contain the same value as the final field). Finally, return those names
that weren't found (which are likely transforms and the final lookup).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == "pk":
name = opts.pk.name
field = None
filtered_relation = None
try:
if opts is None:
raise FieldDoesNotExist
field = opts.get_field(name)
except FieldDoesNotExist:
if name in self.annotation_select:
field = self.annotation_select[name].output_field
elif name in self._filtered_relations and pos == 0:
filtered_relation = self._filtered_relations[name]
if LOOKUP_SEP in filtered_relation.relation_name:
parts = filtered_relation.relation_name.split(LOOKUP_SEP)
filtered_relation_path, field, _, _ = self.names_to_path(
parts,
opts,
allow_many,
fail_on_missing,
)
path.extend(filtered_relation_path[:-1])
else:
field = opts.get_field(filtered_relation.relation_name)
if field is not None:
# Fields that contain one-to-many relations with a generic
# model (like a GenericForeignKey) cannot generate reverse
# relations and therefore cannot be used for reverse querying.
if field.is_relation and not field.related_model:
raise FieldError(
"Field %r does not generate an automatic reverse "
"relation and therefore cannot be used for reverse "
"querying. If it is a GenericForeignKey, consider "
"adding a GenericRelation." % name
)
try:
model = field.model._meta.concrete_model
except AttributeError:
# QuerySet.annotate() may introduce fields that aren't
# attached to a model.
model = None
else:
# We didn't find the current field, so move position back
# one step.
pos -= 1
if pos == -1 or fail_on_missing:
available = sorted(
[
*get_field_names_from_opts(opts),
*self.annotation_select,
*self._filtered_relations,
]
)
raise FieldError(
"Cannot resolve keyword '%s' into field. "
"Choices are: %s" % (name, ", ".join(available))
)
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if opts is not None and model is not opts.model:
path_to_parent = opts.get_path_to_parent(model)
if path_to_parent:
path.extend(path_to_parent)
cur_names_with_path[1].extend(path_to_parent)
opts = path_to_parent[-1].to_opts
if hasattr(field, "path_infos"):
if filtered_relation:
pathinfos = field.get_path_info(filtered_relation)
else:
pathinfos = field.path_infos
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0 : inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
if fail_on_missing and pos + 1 != len(names):
raise FieldError(
"Cannot resolve keyword %r into field. Join on '%s'"
" not permitted." % (names[pos + 1], name)
)
break
return path, final_field, targets, names[pos + 1 :]
def setup_joins(
self,
names,
opts,
alias,
can_reuse=None,
allow_many=True,
reuse_with_filtered_relation=False,
):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
The 'reuse_with_filtered_relation' can be used to force 'can_reuse'
parameter and force the relation on the given connections.
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Return the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins, the
field path traveled to generate the joins, and a transform function
that takes a field and alias and is equivalent to `field.get_col(alias)`
in the simple case but wraps field transforms if they were included in
names.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# The transform can't be applied yet, as joins must be trimmed later.
# To avoid making every caller of this method look up transforms
# directly, compute transforms here and create a partial that converts
# fields to the appropriate wrapped version.
def final_transformer(field, alias):
if not self.alias_cols:
alias = None
return field.get_col(alias)
# Try resolving all the names as fields first. If there's an error,
# treat trailing names as lookups until a field can be resolved.
last_field_exception = None
for pivot in range(len(names), 0, -1):
try:
path, final_field, targets, rest = self.names_to_path(
names[:pivot],
opts,
allow_many,
fail_on_missing=True,
)
except FieldError as exc:
if pivot == 1:
# The first item cannot be a lookup, so it's safe
# to raise the field error here.
raise
else:
last_field_exception = exc
else:
# The transforms are the remaining items that couldn't be
# resolved into fields.
transforms = names[pivot:]
break
for name in transforms:
def transform(field, alias, *, name, previous):
try:
wrapped = previous(field, alias)
return self.try_transform(wrapped, name)
except FieldError:
# FieldError is raised if the transform doesn't exist.
if isinstance(final_field, Field) and last_field_exception:
raise last_field_exception
else:
raise
final_transformer = functools.partial(
transform, name=name, previous=final_transformer
)
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for join in path:
if join.filtered_relation:
filtered_relation = join.filtered_relation.clone()
table_alias = filtered_relation.alias
else:
filtered_relation = None
table_alias = None
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = self.join_class(
opts.db_table,
alias,
table_alias,
INNER,
join.join_field,
nullable,
filtered_relation=filtered_relation,
)
reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None
alias = self.join(
connection,
reuse=reuse,
reuse_with_filtered_relation=reuse_with_filtered_relation,
)
joins.append(alias)
if filtered_relation:
filtered_relation.path = joins[:]
return JoinInfo(final_field, targets, opts, joins, path, final_transformer)
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Return the final target field and table alias and the new active
joins.
Always trim any direct join if the target column is already in the
previous table. Can't trim reverse joins as it's unknown if there's
anything on the other side of the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
if info.filtered_relation:
break
join_targets = {t.column for t in info.join_field.foreign_related_fields}
cur_targets = {t.column for t in targets}
if not cur_targets.issubset(join_targets):
break
targets_dict = {
r[1].column: r[0]
for r in info.join_field.related_fields
if r[1].column in cur_targets
}
targets = tuple(targets_dict[t.column] for t in targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
@classmethod
def _gen_cols(cls, exprs, include_external=False):
for expr in exprs:
if isinstance(expr, Col):
yield expr
elif include_external and callable(
getattr(expr, "get_external_cols", None)
):
yield from expr.get_external_cols()
elif hasattr(expr, "get_source_expressions"):
yield from cls._gen_cols(
expr.get_source_expressions(),
include_external=include_external,
)
@classmethod
def _gen_col_aliases(cls, exprs):
yield from (expr.alias for expr in cls._gen_cols(exprs))
def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):
annotation = self.annotations.get(name)
if annotation is not None:
if not allow_joins:
for alias in self._gen_col_aliases([annotation]):
if isinstance(self.alias_map[alias], Join):
raise FieldError(
"Joined field references are not permitted in this query"
)
if summarize:
# Summarize currently means we are doing an aggregate() query
# which is executed as a wrapped subquery if any of the
# aggregate() elements reference an existing annotation. In
# that case we need to return a Ref to the subquery's annotation.
if name not in self.annotation_select:
raise FieldError(
"Cannot aggregate over the '%s' alias. Use annotate() "
"to promote it." % name
)
return Ref(name, self.annotation_select[name])
else:
return annotation
else:
field_list = name.split(LOOKUP_SEP)
annotation = self.annotations.get(field_list[0])
if annotation is not None:
for transform in field_list[1:]:
annotation = self.try_transform(annotation, transform)
return annotation
join_info = self.setup_joins(
field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse
)
targets, final_alias, join_list = self.trim_joins(
join_info.targets, join_info.joins, join_info.path
)
if not allow_joins and len(join_list) > 1:
raise FieldError(
"Joined field references are not permitted in this query"
)
if len(targets) > 1:
raise FieldError(
"Referencing multicolumn fields with F() objects isn't supported"
)
# Verify that the last lookup in name is a field or a transform:
# transform_function() raises FieldError if not.
transform = join_info.transform_function(targets[0], final_alias)
if reuse is not None:
reuse.update(join_list)
return transform
def split_exclude(self, filter_expr, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
For example, if the origin filter is ~Q(child__name='foo'), filter_expr
is ('child__name', 'foo') and can_reuse is a set of joins usable for
filters in the original query.
We will turn this into equivalent of:
WHERE NOT EXISTS(
SELECT 1
FROM child
WHERE name = 'foo' AND child.parent_id = parent.id
LIMIT 1
)
"""
# Generate the inner query.
query = self.__class__(self.model)
query._filtered_relations = self._filtered_relations
filter_lhs, filter_rhs = filter_expr
if isinstance(filter_rhs, OuterRef):
filter_rhs = OuterRef(filter_rhs)
elif isinstance(filter_rhs, F):
filter_rhs = OuterRef(filter_rhs.name)
query.add_filter(filter_lhs, filter_rhs)
query.clear_ordering(force=True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
col = query.select[0]
select_field = col.target
alias = col.alias
if alias in can_reuse:
pk = select_field.model._meta.pk
# Need to add a restriction so that outer query's filters are in effect for
# the subquery, too.
query.bump_prefix(self)
lookup_class = select_field.get_lookup("exact")
# Note that the query.select[0].alias is different from alias
# due to bump_prefix above.
lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias))
query.where.add(lookup, AND)
query.external_aliases[alias] = True
lookup_class = select_field.get_lookup("exact")
lookup = lookup_class(col, ResolvedOuterRef(trimmed_prefix))
query.where.add(lookup, AND)
condition, needed_inner = self.build_filter(Exists(query))
if contains_louter:
or_null_condition, _ = self.build_filter(
("%s__isnull" % trimmed_prefix, True),
current_negated=True,
branch_negated=True,
can_reuse=can_reuse,
)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition, needed_inner
def set_empty(self):
self.where.add(NothingNode(), AND)
for query in self.combined_queries:
query.set_empty()
def is_empty(self):
return any(isinstance(c, NothingNode) for c in self.where.children)
def set_limits(self, low=None, high=None):
"""
Adjust the limits on the rows retrieved. Use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, convert them to the appropriate offset and limit values.
Apply any limits passed in here to the existing constraints. Add low
to the current low value and clamp both to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
if self.low_mark == self.high_mark:
self.set_empty()
def clear_limits(self):
"""Clear any existing limits."""
self.low_mark, self.high_mark = 0, None
@property
def is_sliced(self):
return self.low_mark != 0 or self.high_mark is not None
def has_limit_one(self):
return self.high_mark is not None and (self.high_mark - self.low_mark) == 1
def can_filter(self):
"""
Return True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.is_sliced
def clear_select_clause(self):
"""Remove all fields from SELECT clause."""
self.select = ()
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_annotation_mask(())
def clear_select_fields(self):
"""
Clear the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = ()
self.values_select = ()
def add_select_col(self, col, name):
self.select += (col,)
self.values_select += (name,)
def set_select(self, cols):
self.default_cols = False
self.select = tuple(cols)
def add_distinct_fields(self, *field_names):
"""
Add and resolve the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Add the given (model) fields to the select set. Add the field names in
the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
cols = []
for name in field_names:
# Join promotion note - we must not remove any rows here, so
# if there is no existing joins, use outer join.
join_info = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m
)
targets, final_alias, joins = self.trim_joins(
join_info.targets,
join_info.joins,
join_info.path,
)
for target in targets:
cols.append(join_info.transform_function(target, final_alias))
if cols:
self.set_select(cols)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
elif name in self.annotations:
raise FieldError(
"Cannot select the '%s' alias. Use annotate() to promote "
"it." % name
)
else:
names = sorted(
[
*get_field_names_from_opts(opts),
*self.extra,
*self.annotation_select,
*self._filtered_relations,
]
)
raise FieldError(
"Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names))
)
def add_ordering(self, *ordering):
"""
Add items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or OrderBy
expressions.
If 'ordering' is empty, clear all ordering from the query.
"""
errors = []
for item in ordering:
if isinstance(item, str):
if item == "?":
continue
if item.startswith("-"):
item = item[1:]
if item in self.annotations:
continue
if self.extra and item in self.extra:
continue
# names_to_path() validates the lookup. A descriptive
# FieldError will be raise if it's not.
self.names_to_path(item.split(LOOKUP_SEP), self.model._meta)
elif not hasattr(item, "resolve_expression"):
errors.append(item)
if getattr(item, "contains_aggregate", False):
raise FieldError(
"Using an aggregate in order_by() without also including "
"it in annotate() is not allowed: %s" % item
)
if errors:
raise FieldError("Invalid order_by arguments: %s" % errors)
if ordering:
self.order_by += ordering
else:
self.default_ordering = False
def clear_ordering(self, force=False, clear_default=True):
"""
Remove any ordering settings if the current query allows it without
side effects, set 'force' to True to clear the ordering regardless.
If 'clear_default' is True, there will be no ordering in the resulting
query (not even the model's default).
"""
if not force and (
self.is_sliced or self.distinct_fields or self.select_for_update
):
return
self.order_by = ()
self.extra_order_by = ()
if clear_default:
self.default_ordering = False
def set_group_by(self, allow_aliases=True):
"""
Expand the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
# Column names from JOINs to check collisions with aliases.
if allow_aliases:
column_names = set()
seen_models = set()
for join in list(self.alias_map.values())[1:]: # Skip base table.
model = join.join_field.related_model
if model not in seen_models:
column_names.update(
{field.column for field in model._meta.local_concrete_fields}
)
seen_models.add(model)
group_by = list(self.select)
if self.annotation_select:
for alias, annotation in self.annotation_select.items():
if not allow_aliases or alias in column_names:
alias = None
group_by_cols = annotation.get_group_by_cols(alias=alias)
group_by.extend(group_by_cols)
self.group_by = tuple(group_by)
def add_select_related(self, fields):
"""
Set up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
if isinstance(self.select_related, bool):
field_dict = {}
else:
field_dict = self.select_related
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Add data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = {}
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
self.check_alias(name)
entry = str(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
if pos == 0 or entry[pos - 1] != "%":
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""Remove any fields from the deferred loading set."""
self.deferred_loading = (frozenset(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. Add the new field names to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL column names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
if new_existing := existing.difference(field_names):
self.deferred_loading = new_existing, False
else:
self.clear_deferred_loading()
if new_only := set(field_names).difference(existing):
self.deferred_loading = new_only, True
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, remove
those names from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if "pk" in field_names:
field_names.remove("pk")
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = frozenset(field_names), False
def set_annotation_mask(self, names):
"""Set the mask of annotations that will be returned by the SELECT."""
if names is None:
self.annotation_select_mask = None
else:
self.annotation_select_mask = set(names)
self._annotation_select_cache = None
def append_annotation_mask(self, names):
if self.annotation_select_mask is not None:
self.set_annotation_mask(self.annotation_select_mask.union(names))
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT.
Don't remove them from the Query since they might be used later.
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def set_values(self, fields):
self.select_related = False
self.clear_deferred_loading()
self.clear_select_fields()
self.has_select_fields = True
if fields:
field_names = []
extra_names = []
annotation_names = []
if not self.extra and not self.annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
self.default_cols = False
for f in fields:
if f in self.extra_select:
extra_names.append(f)
elif f in self.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
self.set_extra_mask(extra_names)
self.set_annotation_mask(annotation_names)
selected = frozenset(field_names + extra_names + annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
selected = frozenset(field_names)
# Selected annotations must be known before setting the GROUP BY
# clause.
if self.group_by is True:
self.add_fields(
(f.attname for f in self.model._meta.concrete_fields), False
)
# Disable GROUP BY aliases to avoid orphaning references to the
# SELECT clause which is about to be cleared.
self.set_group_by(allow_aliases=False)
self.clear_select_fields()
elif self.group_by:
# Resolve GROUP BY annotation references if they are not part of
# the selected fields anymore.
group_by = []
for expr in self.group_by:
if isinstance(expr, Ref) and expr.refs not in selected:
expr = self.annotations[expr.refs]
group_by.append(expr)
self.group_by = tuple(group_by)
self.values_select = tuple(field_names)
self.add_fields(field_names, True)
@property
def annotation_select(self):
"""
Return the dictionary of aggregate columns that are not masked and
should be used in the SELECT clause. Cache this result for performance.
"""
if self._annotation_select_cache is not None:
return self._annotation_select_cache
elif not self.annotations:
return {}
elif self.annotation_select_mask is not None:
self._annotation_select_cache = {
k: v
for k, v in self.annotations.items()
if k in self.annotation_select_mask
}
return self._annotation_select_cache
else:
return self.annotations
@property
def extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
if not self.extra:
return {}
elif self.extra_select_mask is not None:
self._extra_select_cache = {
k: v for k, v in self.extra.items() if k in self.extra_select_mask
}
return self._extra_select_cache
else:
return self.extra
def trim_start(self, names_with_path):
"""
Trim joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also set the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Return a lookup usable for doing outerq.filter(lookup=self) and a
boolean indicating if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [
t for t in self.alias_map if t in self._lookup_joins or t == self.base_table
]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:
contains_louter = True
alias = lookup_tables[trimmed_paths]
self.unref_alias(alias)
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for:
# - LEFT JOINs because we would miss those rows that have nothing on
# the outer side,
# - INNER JOINs from filtered relations because we would miss their
# filters.
first_join = self.alias_map[lookup_tables[trimmed_paths + 1]]
if first_join.join_type != LOUTER and not first_join.filtered_relation:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(
None, lookup_tables[trimmed_paths + 1]
)
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
# The found starting point is likely a join_class instead of a
# base_table_class reference. But the first entry in the query's FROM
# clause must not be a JOIN.
for table in self.alias_map:
if self.alias_refcount[table] > 0:
self.alias_map[table] = self.base_table_class(
self.alias_map[table].table_name,
table,
)
break
self.set_select([f.get_col(select_alias) for f in select_fields])
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
Check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
return field.null or (
field.empty_strings_allowed
and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
)
def get_order_dir(field, default="ASC"):
"""
Return the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == "-":
return field[1:], dirn[1]
return field, dirn[0]
class JoinPromoter:
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.votes = Counter()
def __repr__(self):
return (
f"{self.__class__.__qualname__}(connector={self.connector!r}, "
f"num_children={self.num_children!r}, negated={self.negated!r})"
)
def add_votes(self, votes):
"""
Add single vote per item to self.votes. Parameter can be any
iterable.
"""
self.votes.update(votes)
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == OR and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == AND or (
self.effective_connector == OR and votes == self.num_children
):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
|
788de15e4dde38f82e155509ad62b8cfc245fa114e8e32b80f38ef82d4c220da | import collections
import json
import re
from functools import partial
from itertools import chain
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DatabaseError, NotSupportedError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import F, OrderBy, RawSQL, Ref, Value
from django.db.models.functions import Cast, Random
from django.db.models.lookups import Lookup
from django.db.models.query_utils import select_related_descend
from django.db.models.sql.constants import (
CURSOR,
GET_ITERATOR_CHUNK_SIZE,
MULTI,
NO_RESULTS,
ORDER_DIR,
SINGLE,
)
from django.db.models.sql.query import Query, get_order_dir
from django.db.models.sql.where import AND
from django.db.transaction import TransactionManagementError
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
from django.utils.regex_helper import _lazy_re_compile
class SQLCompiler:
# Multiline ordering SQL clause may appear from RawSQL.
ordering_parts = _lazy_re_compile(
r"^(.*)\s(?:ASC|DESC).*",
re.MULTILINE | re.DOTALL,
)
def __init__(self, query, connection, using, elide_empty=True):
self.query = query
self.connection = connection
self.using = using
# Some queries, e.g. coalesced aggregation, need to be executed even if
# they would return an empty result set.
self.elide_empty = elide_empty
self.quote_cache = {"*": "*"}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self._meta_ordering = None
def __repr__(self):
return (
f"<{self.__class__.__qualname__} "
f"model={self.query.model.__qualname__} "
f"connection={self.connection!r} using={self.using!r}>"
)
def setup_query(self, with_col_aliases=False):
if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select(
with_col_aliases=with_col_aliases,
)
self.col_count = len(self.select)
def pre_sql_setup(self, with_col_aliases=False):
"""
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query(with_col_aliases=with_col_aliases)
order_by = self.get_order_by()
self.where, self.having, self.qualify = self.query.where.split_having_qualify(
must_group_by=self.query.group_by is not None
)
extra_select = self.get_extra_select(order_by, self.select)
self.has_extra_select = bool(extra_select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk')
# .annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, "as_sql"):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
ref_sources = {expr.source for expr in expressions if isinstance(expr, Ref)}
for expr, _, _ in select:
# Skip members of the select clause that are already included
# by reference.
if expr in ref_sources:
continue
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
if not self._meta_ordering:
for expr, (sql, params, is_ref) in order_by:
# Skip references to the SELECT clause, as all expressions in
# the SELECT clause are already part of the GROUP BY.
if not is_ref:
expressions.extend(expr.get_group_by_cols())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
try:
sql, params = self.compile(expr)
except EmptyResultSet:
continue
sql, params = expr.select_format(self, sql, params)
params_hash = make_hashable(params)
if (sql, params_hash) not in seen:
result.append((sql, params))
seen.add((sql, params_hash))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key.
if self.connection.features.allows_group_by_pk:
# Determine if the main model's primary key is in the query.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (
getattr(expr, "target", None) == self.query.model._meta.pk
and getattr(expr, "alias", None) == self.query.base_table
):
pk = expr
break
# If the main model's primary key is in the query, group by that
# field, HAVING expressions, and expressions associated with tables
# that don't have a primary key included in the grouped columns.
if pk:
pk_aliases = {
expr.alias
for expr in expressions
if hasattr(expr, "target") and expr.target.primary_key
}
expressions = [pk] + [
expr
for expr in expressions
if expr in having
or (
getattr(expr, "alias", None) is not None
and expr.alias not in pk_aliases
)
]
elif self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
# Unmanaged models are excluded because they could be representing
# database views on which the optimization might not be allowed.
pks = {
expr
for expr in expressions
if (
hasattr(expr, "target")
and expr.target.primary_key
and self.connection.features.allows_group_by_selected_pks_on_model(
expr.target.model
)
)
}
aliases = {expr.alias for expr in pks}
expressions = [
expr
for expr in expressions
if expr in pks or getattr(expr, "alias", None) not in aliases
]
return expressions
def get_select(self, with_col_aliases=False):
"""
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- The base model of the query.
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
select_mask = self.query.get_select_mask()
if self.query.default_cols:
cols = self.get_default_columns(select_mask)
else:
# self.query.select is a special case. These columns never go to
# any model.
cols = self.query.select
if cols:
select_list = []
for col in cols:
select_list.append(select_idx)
select.append((col, None))
select_idx += 1
klass_info = {
"model": self.query.model,
"select_fields": select_list,
}
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select, select_mask)
klass_info["related_klass_infos"] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info["related_klass_infos"]:
if ki["from_parent"]:
ki["select_fields"] = (
klass_info["select_fields"] + ki["select_fields"]
)
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
col_idx = 1
for col, alias in select:
try:
sql, params = self.compile(col)
except EmptyResultSet:
empty_result_set_value = getattr(
col, "empty_result_set_value", NotImplemented
)
if empty_result_set_value is NotImplemented:
# Select a predicate that's always False.
sql, params = "0", ()
else:
sql, params = self.compile(Value(empty_result_set_value))
else:
sql, params = col.select_format(self, sql, params)
if alias is None and with_col_aliases:
alias = f"col{col_idx}"
col_idx += 1
ret.append((col, (sql, params), alias))
return ret, klass_info, annotations
def _order_by_pairs(self):
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
elif self.query.order_by:
ordering = self.query.order_by
elif (meta := self.query.get_meta()) and meta.ordering:
ordering = meta.ordering
self._meta_ordering = ordering
else:
ordering = []
if self.query.standard_ordering:
default_order, _ = ORDER_DIR["ASC"]
else:
default_order, _ = ORDER_DIR["DESC"]
for field in ordering:
if hasattr(field, "resolve_expression"):
if isinstance(field, Value):
# output_field must be resolved for constants.
field = Cast(field, field.output_field)
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field = field.copy()
field.reverse_ordering()
yield field, False
continue
if field == "?": # random
yield OrderBy(Random()), False
continue
col, order = get_order_dir(field, default_order)
descending = order == "DESC"
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
yield (
OrderBy(
Ref(col, self.query.annotation_select[col]),
descending=descending,
),
True,
)
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT
# clause.
if self.query.combinator and self.select:
# Don't use the resolved annotation because other
# combinated queries might define it differently.
expr = F(col)
else:
expr = self.query.annotations[col]
if isinstance(expr, Value):
# output_field must be resolved for constants.
expr = Cast(expr, expr.output_field)
yield OrderBy(expr, descending=descending), False
continue
if "." in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split(".", 1)
yield (
OrderBy(
RawSQL(
"%s.%s" % (self.quote_name_unless_alias(table), col), []
),
descending=descending,
),
False,
)
continue
if self.query.extra and col in self.query.extra:
if col in self.query.extra_select:
yield (
OrderBy(
Ref(col, RawSQL(*self.query.extra[col])),
descending=descending,
),
True,
)
else:
yield (
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False,
)
else:
if self.query.combinator and self.select:
# Don't use the first model's field because other
# combinated queries might define it differently.
yield OrderBy(F(col), descending=descending), False
else:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
yield from self.find_ordering_name(
field,
self.query.get_meta(),
default_order=default_order,
)
def get_order_by(self):
"""
Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for
the ORDER BY clause.
The order_by clause can alter the select clause (for example it can add
aliases to clauses that do not yet have one, or it can add totally new
select clauses).
"""
result = []
seen = set()
for expr, is_ref in self._order_by_pairs():
resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)
if self.query.combinator and self.select:
src = resolved.get_source_expressions()[0]
expr_src = expr.get_source_expressions()[0]
# Relabel order by columns to raw numbers if this is a combined
# query; necessary since the columns can't be referenced by the
# fully qualified name and the simple column names may collide.
for idx, (sel_expr, _, col_alias) in enumerate(self.select):
if is_ref and col_alias == src.refs:
src = src.source
elif col_alias and not (
isinstance(expr_src, F) and col_alias == expr_src.name
):
continue
if src == sel_expr:
resolved.set_source_expressions([RawSQL("%d" % (idx + 1), ())])
break
else:
if col_alias:
raise DatabaseError(
"ORDER BY term does not match any column in the result set."
)
# Add column used in ORDER BY clause to the selected
# columns and to each combined query.
order_by_idx = len(self.query.select) + 1
col_name = f"__orderbycol{order_by_idx}"
for q in self.query.combined_queries:
q.add_annotation(expr_src, col_name)
self.query.add_select_col(resolved, col_name)
resolved.set_source_expressions([RawSQL(f"{order_by_idx}", ())])
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql)[1]
params_hash = make_hashable(params)
if (without_ordering, params_hash) in seen:
continue
seen.add((without_ordering, params_hash))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
if self.query.distinct and not self.query.distinct_fields:
select_sql = [t[1] for t in select]
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql)[1]
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if (
(name in self.query.alias_map and name not in self.query.table_map)
or name in self.query.extra_select
or (
self.query.external_aliases.get(name)
and name not in self.query.table_map
)
):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node):
vendor_impl = getattr(node, "as_" + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
return sql, params
def get_combinator_sql(self, combinator, all):
features = self.connection.features
compilers = [
query.get_compiler(self.using, self.connection, self.elide_empty)
for query in self.query.combined_queries
if not query.is_empty()
]
if not features.supports_slicing_ordering_in_compound:
for query, compiler in zip(self.query.combined_queries, compilers):
if query.low_mark or query.high_mark:
raise DatabaseError(
"LIMIT/OFFSET not allowed in subqueries of compound statements."
)
if compiler.get_order_by():
raise DatabaseError(
"ORDER BY not allowed in subqueries of compound statements."
)
parts = ()
for compiler in compilers:
try:
# If the columns list is limited, then all combined queries
# must have the same columns list. Set the selects defined on
# the query on all combined queries, if not already set.
if not compiler.query.values_select and self.query.values_select:
compiler.query = compiler.query.clone()
compiler.query.set_values(
(
*self.query.extra_select,
*self.query.values_select,
*self.query.annotation_select,
)
)
part_sql, part_args = compiler.as_sql()
if compiler.query.combinator:
# Wrap in a subquery if wrapping in parentheses isn't
# supported.
if not features.supports_parentheses_in_compound:
part_sql = "SELECT * FROM ({})".format(part_sql)
# Add parentheses when combining with compound query if not
# already added for all compound queries.
elif (
self.query.subquery
or not features.supports_slicing_ordering_in_compound
):
part_sql = "({})".format(part_sql)
elif (
self.query.subquery
and features.supports_slicing_ordering_in_compound
):
part_sql = "({})".format(part_sql)
parts += ((part_sql, part_args),)
except EmptyResultSet:
# Omit the empty queryset with UNION and with DIFFERENCE if the
# first queryset is nonempty.
if combinator == "union" or (combinator == "difference" and parts):
continue
raise
if not parts:
raise EmptyResultSet
combinator_sql = self.connection.ops.set_operators[combinator]
if all and combinator == "union":
combinator_sql += " ALL"
braces = "{}"
if not self.query.subquery and features.supports_slicing_ordering_in_compound:
braces = "({})"
sql_parts, args_parts = zip(
*((braces.format(sql), args) for sql, args in parts)
)
result = [" {} ".format(combinator_sql).join(sql_parts)]
params = []
for part in args_parts:
params.extend(part)
return result, params
def get_qualify_sql(self):
where_parts = []
if self.where:
where_parts.append(self.where)
if self.having:
where_parts.append(self.having)
inner_query = self.query.clone()
inner_query.subquery = True
inner_query.where = inner_query.where.__class__(where_parts)
# Augment the inner query with any window function references that
# might have been masked via values() and alias(). If any masked
# aliases are added they'll be masked again to avoid fetching
# the data in the `if qual_aliases` branch below.
select = {
expr: alias for expr, _, alias in self.get_select(with_col_aliases=True)[0]
}
select_aliases = set(select.values())
qual_aliases = set()
replacements = {}
def collect_replacements(expressions):
while expressions:
expr = expressions.pop()
if expr in replacements:
continue
elif select_alias := select.get(expr):
replacements[expr] = select_alias
elif isinstance(expr, Lookup):
expressions.extend(expr.get_source_expressions())
elif isinstance(expr, Ref):
if expr.refs not in select_aliases:
expressions.extend(expr.get_source_expressions())
else:
num_qual_alias = len(qual_aliases)
select_alias = f"qual{num_qual_alias}"
qual_aliases.add(select_alias)
inner_query.add_annotation(expr, select_alias)
replacements[expr] = select_alias
collect_replacements(list(self.qualify.leaves()))
self.qualify = self.qualify.replace_expressions(
{expr: Ref(alias, expr) for expr, alias in replacements.items()}
)
order_by = []
for order_by_expr, *_ in self.get_order_by():
collect_replacements(order_by_expr.get_source_expressions())
order_by.append(
order_by_expr.replace_expressions(
{expr: Ref(alias, expr) for expr, alias in replacements.items()}
)
)
inner_query_compiler = inner_query.get_compiler(
self.using, elide_empty=self.elide_empty
)
inner_sql, inner_params = inner_query_compiler.as_sql(
# The limits must be applied to the outer query to avoid pruning
# results too eagerly.
with_limits=False,
# Force unique aliasing of selected columns to avoid collisions
# and make rhs predicates referencing easier.
with_col_aliases=True,
)
qualify_sql, qualify_params = self.compile(self.qualify)
result = [
"SELECT * FROM (",
inner_sql,
")",
self.connection.ops.quote_name("qualify"),
"WHERE",
qualify_sql,
]
if qual_aliases:
# If some select aliases were unmasked for filtering purposes they
# must be masked back.
cols = [self.connection.ops.quote_name(alias) for alias in select.values()]
result = [
"SELECT",
", ".join(cols),
"FROM (",
*result,
")",
self.connection.ops.quote_name("qualify_mask"),
]
params = list(inner_params) + qualify_params
# As the SQL spec is unclear on whether or not derived tables
# ordering must propagate it has to be explicitly repeated on the
# outer-most query to ensure it's preserved.
if order_by:
ordering_sqls = []
for ordering in order_by:
ordering_sql, ordering_params = self.compile(ordering)
ordering_sqls.append(ordering_sql)
params.extend(ordering_params)
result.extend(["ORDER BY", ", ".join(ordering_sqls)])
return result, params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup(
with_col_aliases=with_col_aliases,
)
for_update_part = None
# Is a LIMIT/OFFSET clause needed?
with_limit_offset = with_limits and (
self.query.high_mark is not None or self.query.low_mark
)
combinator = self.query.combinator
features = self.connection.features
if combinator:
if not getattr(features, "supports_select_{}".format(combinator)):
raise NotSupportedError(
"{} is not supported on this database backend.".format(
combinator
)
)
result, params = self.get_combinator_sql(
combinator, self.query.combinator_all
)
elif self.qualify:
result, params = self.get_qualify_sql()
order_by = None
else:
distinct_fields, distinct_params = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct'
# (see docstring of get_from_clause() for details).
from_, f_params = self.get_from_clause()
try:
where, w_params = (
self.compile(self.where) if self.where is not None else ("", [])
)
except EmptyResultSet:
if self.elide_empty:
raise
# Use a predicate that's always False.
where, w_params = "0 = 1", []
having, h_params = (
self.compile(self.having) if self.having is not None else ("", [])
)
result = ["SELECT"]
params = []
if self.query.distinct:
distinct_result, distinct_params = self.connection.ops.distinct_sql(
distinct_fields,
distinct_params,
)
result += distinct_result
params += distinct_params
out_cols = []
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = "%s AS %s" % (
s_sql,
self.connection.ops.quote_name(alias),
)
params.extend(s_params)
out_cols.append(s_sql)
result += [", ".join(out_cols)]
if from_:
result += ["FROM", *from_]
elif self.connection.features.bare_select_suffix:
result += [self.connection.features.bare_select_suffix]
params.extend(f_params)
if self.query.select_for_update and features.has_select_for_update:
if (
self.connection.get_autocommit()
# Don't raise an exception when database doesn't
# support transactions, as it's a noop.
and features.supports_transactions
):
raise TransactionManagementError(
"select_for_update cannot be used outside of a transaction."
)
if (
with_limit_offset
and not features.supports_select_for_update_with_limit
):
raise NotSupportedError(
"LIMIT/OFFSET is not supported with "
"select_for_update on this database backend."
)
nowait = self.query.select_for_update_nowait
skip_locked = self.query.select_for_update_skip_locked
of = self.query.select_for_update_of
no_key = self.query.select_for_no_key_update
# If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the
# backend doesn't support it, raise NotSupportedError to
# prevent a possible deadlock.
if nowait and not features.has_select_for_update_nowait:
raise NotSupportedError(
"NOWAIT is not supported on this database backend."
)
elif skip_locked and not features.has_select_for_update_skip_locked:
raise NotSupportedError(
"SKIP LOCKED is not supported on this database backend."
)
elif of and not features.has_select_for_update_of:
raise NotSupportedError(
"FOR UPDATE OF is not supported on this database backend."
)
elif no_key and not features.has_select_for_no_key_update:
raise NotSupportedError(
"FOR NO KEY UPDATE is not supported on this "
"database backend."
)
for_update_part = self.connection.ops.for_update_sql(
nowait=nowait,
skip_locked=skip_locked,
of=self.get_select_for_update_of_arguments(),
no_key=no_key,
)
if for_update_part and features.for_update_after_from:
result.append(for_update_part)
if where:
result.append("WHERE %s" % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) is not implemented."
)
order_by = order_by or self.connection.ops.force_no_ordering()
result.append("GROUP BY %s" % ", ".join(grouping))
if self._meta_ordering:
order_by = None
if having:
result.append("HAVING %s" % having)
params.extend(h_params)
if self.query.explain_info:
result.insert(
0,
self.connection.ops.explain_query_prefix(
self.query.explain_info.format,
**self.query.explain_info.options,
),
)
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append("ORDER BY %s" % ", ".join(ordering))
if with_limit_offset:
result.append(
self.connection.ops.limit_offset_sql(
self.query.low_mark, self.query.high_mark
)
)
if for_update_part and not features.for_update_after_from:
result.append(for_update_part)
if self.query.subquery and extra_select:
# If the query is used as a subquery, the extra selects would
# result in more columns than the left-hand side expression is
# expecting. This can happen when a subquery uses a combination
# of order_by() and distinct(), forcing the ordering expressions
# to be selected as well. Wrap the query in another subquery
# to exclude extraneous selects.
sub_selects = []
sub_params = []
for index, (select, _, alias) in enumerate(self.select, start=1):
if alias:
sub_selects.append(
"%s.%s"
% (
self.connection.ops.quote_name("subquery"),
self.connection.ops.quote_name(alias),
)
)
else:
select_clone = select.relabeled_clone(
{select.alias: "subquery"}
)
subselect, subparams = select_clone.as_sql(
self, self.connection
)
sub_selects.append(subselect)
sub_params.extend(subparams)
return "SELECT %s FROM (%s) subquery" % (
", ".join(sub_selects),
" ".join(result),
), tuple(sub_params + params)
return " ".join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def get_default_columns(
self, select_mask, start_alias=None, opts=None, from_parent=None
):
"""
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
if (opts := self.query.get_meta()) is None:
return result
start_alias = start_alias or self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if (
from_parent
and model is not None
and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model
)
):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if select_mask and field not in select_mask:
continue
alias = self.query.join_parent_model(opts, model, start_alias, seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Return a quoted list of fields to use in DISTINCT ON part of the query.
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
result = []
params = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _, transform_function = self._setup_joins(
parts, opts, None
)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(self.connection.ops.quote_name(name))
else:
r, p = self.compile(transform_function(target, alias))
result.append(r)
params.append(p)
return result, params
def find_ordering_name(
self, name, opts, alias=None, default_order="ASC", already_seen=None
):
"""
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = order == "DESC"
pieces = name.split(LOOKUP_SEP)
(
field,
targets,
alias,
joins,
path,
opts,
transform_function,
) = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless it is the pk
# shortcut or the attribute name of the field that is specified.
if (
field.is_relation
and opts.ordering
and getattr(field, "attname", None) != pieces[-1]
and name != "pk"
):
# Firstly, avoid infinite loops.
already_seen = already_seen or set()
join_tuple = tuple(
getattr(self.query.alias_map[j], "join_cols", None) for j in joins
)
if join_tuple in already_seen:
raise FieldError("Infinite loop caused by ordering.")
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
if hasattr(item, "resolve_expression") and not isinstance(
item, OrderBy
):
item = item.desc() if descending else item.asc()
if isinstance(item, OrderBy):
results.append(
(item.prefix_references(f"{name}{LOOKUP_SEP}"), False)
)
continue
results.extend(
(expr.prefix_references(f"{name}{LOOKUP_SEP}"), is_ref)
for expr, is_ref in self.find_ordering_name(
item, opts, alias, order, already_seen
)
)
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [
(OrderBy(transform_function(t, alias), descending=descending), False)
for t in targets
]
def _setup_joins(self, pieces, opts, alias):
"""
Helper method for get_order_by() and get_distinct().
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
alias = alias or self.query.get_initial_alias()
field, targets, opts, joins, path, transform_function = self.query.setup_joins(
pieces, opts, alias
)
alias = joins[-1]
return field, targets, alias, joins, path, opts, transform_function
def get_from_clause(self):
"""
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
for alias in tuple(self.query.alias_map):
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if (
alias not in self.query.alias_map
or self.query.alias_refcount[alias] == 1
):
result.append(", %s" % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(
self,
select,
select_mask,
opts=None,
root_alias=None,
cur_depth=1,
requested=None,
restricted=None,
):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects
if f.field.unique
)
return chain(
direct_choices, reverse_choices, self.query._filtered_relations
)
related_klass_infos = []
if not restricted and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
restricted = isinstance(self.query.select_related, dict)
if restricted:
requested = self.query.select_related
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info["related_klass_infos"] = related_klass_infos
for f in opts.fields:
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or f.name in requested:
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s"
% (
f.name,
", ".join(_get_field_choices()) or "(none)",
)
)
else:
next = False
if not select_related_descend(f, restricted, requested, select_mask):
continue
related_select_mask = select_mask.get(f) or {}
klass_info = {
"model": f.remote_field.model,
"field": f,
"reverse": False,
"local_setter": f.set_cached_value,
"remote_setter": f.remote_field.set_cached_value
if f.unique
else lambda x, y: None,
"from_parent": False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _, _ = self.query.setup_joins([f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(
related_select_mask, start_alias=alias, opts=f.remote_field.model._meta
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next_klass_infos = self.get_related_selections(
select,
related_select_mask,
f.remote_field.model._meta,
alias,
cur_depth + 1,
next,
restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
related_select_mask = select_mask.get(f) or {}
if not select_related_descend(
f, restricted, requested, related_select_mask, reverse=True
):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
join_info = self.query.setup_joins(
[related_field_name], opts, root_alias
)
alias = join_info.joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
"model": model,
"field": f,
"reverse": True,
"local_setter": f.remote_field.set_cached_value,
"remote_setter": f.set_cached_value,
"from_parent": from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
related_select_mask,
start_alias=alias,
opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select,
related_select_mask,
model._meta,
alias,
cur_depth + 1,
next,
restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
def local_setter(obj, from_obj):
# Set a reverse fk object when relation is non-empty.
if from_obj:
f.remote_field.set_cached_value(from_obj, obj)
def remote_setter(name, obj, from_obj):
setattr(from_obj, name, obj)
for name in list(requested):
# Filtered relations work only on the topmost level.
if cur_depth > 1:
break
if name in self.query._filtered_relations:
fields_found.add(name)
f, _, join_opts, joins, _, _ = self.query.setup_joins(
[name], opts, root_alias
)
model = join_opts.model
alias = joins[-1]
from_parent = (
issubclass(model, opts.model) and model is not opts.model
)
klass_info = {
"model": model,
"field": f,
"reverse": True,
"local_setter": local_setter,
"remote_setter": partial(remote_setter, name),
"from_parent": from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
field_select_mask = select_mask.get((name, f)) or {}
columns = self.get_default_columns(
field_select_mask,
start_alias=alias,
opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next_requested = requested.get(name, {})
next_klass_infos = self.get_related_selections(
select,
field_select_mask,
opts=model._meta,
root_alias=alias,
cur_depth=cur_depth + 1,
requested=next_requested,
restricted=restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
"Invalid field name(s) given in select_related: %s. "
"Choices are: %s"
% (
", ".join(invalid_fields),
", ".join(_get_field_choices()) or "(none)",
)
)
return related_klass_infos
def get_select_for_update_of_arguments(self):
"""
Return a quoted list of arguments for the SELECT FOR UPDATE OF part of
the query.
"""
def _get_parent_klass_info(klass_info):
concrete_model = klass_info["model"]._meta.concrete_model
for parent_model, parent_link in concrete_model._meta.parents.items():
parent_list = parent_model._meta.get_parent_list()
yield {
"model": parent_model,
"field": parent_link,
"reverse": False,
"select_fields": [
select_index
for select_index in klass_info["select_fields"]
# Selected columns from a model or its parents.
if (
self.select[select_index][0].target.model == parent_model
or self.select[select_index][0].target.model in parent_list
)
],
}
def _get_first_selected_col_from_model(klass_info):
"""
Find the first selected column from a model. If it doesn't exist,
don't lock a model.
select_fields is filled recursively, so it also contains fields
from the parent models.
"""
concrete_model = klass_info["model"]._meta.concrete_model
for select_index in klass_info["select_fields"]:
if self.select[select_index][0].target.model == concrete_model:
return self.select[select_index][0]
def _get_field_choices():
"""Yield all allowed field paths in breadth-first search order."""
queue = collections.deque([(None, self.klass_info)])
while queue:
parent_path, klass_info = queue.popleft()
if parent_path is None:
path = []
yield "self"
else:
field = klass_info["field"]
if klass_info["reverse"]:
field = field.remote_field
path = parent_path + [field.name]
yield LOOKUP_SEP.join(path)
queue.extend(
(path, klass_info)
for klass_info in _get_parent_klass_info(klass_info)
)
queue.extend(
(path, klass_info)
for klass_info in klass_info.get("related_klass_infos", [])
)
if not self.klass_info:
return []
result = []
invalid_names = []
for name in self.query.select_for_update_of:
klass_info = self.klass_info
if name == "self":
col = _get_first_selected_col_from_model(klass_info)
else:
for part in name.split(LOOKUP_SEP):
klass_infos = (
*klass_info.get("related_klass_infos", []),
*_get_parent_klass_info(klass_info),
)
for related_klass_info in klass_infos:
field = related_klass_info["field"]
if related_klass_info["reverse"]:
field = field.remote_field
if field.name == part:
klass_info = related_klass_info
break
else:
klass_info = None
break
if klass_info is None:
invalid_names.append(name)
continue
col = _get_first_selected_col_from_model(klass_info)
if col is not None:
if self.connection.features.select_for_update_of_column:
result.append(self.compile(col)[0])
else:
result.append(self.quote_name_unless_alias(col.alias))
if invalid_names:
raise FieldError(
"Invalid field name(s) given in select_for_update(of=(...)): %s. "
"Only relational fields followed in the query are allowed. "
"Choices are: %s."
% (
", ".join(invalid_names),
", ".join(_get_field_choices()),
)
)
return result
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters + field_converters, expression)
return converters
def apply_converters(self, rows, converters):
connection = self.connection
converters = list(converters.items())
for row in map(list, rows):
for pos, (convs, expression) in converters:
value = row[pos]
for converter in convs:
value = converter(value, expression, connection)
row[pos] = value
yield row
def results_iter(
self,
results=None,
tuple_expected=False,
chunked_fetch=False,
chunk_size=GET_ITERATOR_CHUNK_SIZE,
):
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(
MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size
)
fields = [s[0] for s in self.select[0 : self.col_count]]
converters = self.get_converters(fields)
rows = chain.from_iterable(results)
if converters:
rows = self.apply_converters(rows, converters)
if tuple_expected:
rows = map(tuple, rows)
return rows
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
return bool(self.execute_sql(SINGLE))
def execute_sql(
self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE
):
"""
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
result_type = result_type or NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
if chunked_fetch:
cursor = self.connection.chunked_cursor()
else:
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
# Might fail for server-side cursors (e.g. connection closed)
cursor.close()
raise
if result_type == CURSOR:
# Give the caller the cursor to process and close.
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0 : self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor,
self.connection.features.empty_fetchmany_value,
self.col_count if self.has_extra_select else None,
chunk_size,
)
if not chunked_fetch or not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further. Use chunked_fetch if requested,
# unless the database doesn't support it.
return list(result)
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = "%s.%s" % (qn(alias), qn2(columns[index]))
self.query.where.add(RawSQL("%s = %s" % (lhs_sql, rhs), lhs_params), AND)
sql, params = self.as_sql()
return "EXISTS (%s)" % sql, params
def explain_query(self):
result = list(self.execute_sql())
# Some backends return 1 item tuples with strings, and others return
# tuples with integers and strings. Flatten them out into strings.
format_ = self.query.explain_info.format
output_formatter = json.dumps if format_ and format_.lower() == "json" else str
for row in result[0]:
if not isinstance(row, str):
yield " ".join(output_formatter(c) for c in row)
else:
yield row
class SQLInsertCompiler(SQLCompiler):
returning_fields = None
returning_params = ()
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, "as_sql"):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, "get_placeholder"):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = "%s", [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, "resolve_expression"):
value = value.resolve_expression(
self.query, allow_joins=False, for_save=True
)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
"can only be used to update, not to insert." % (value, field)
)
if value.contains_aggregate:
raise FieldError(
"Aggregate functions are not allowed in this query "
"(%s=%r)." % (field.name, value)
)
if value.contains_over_clause:
raise FieldError(
"Window expressions are not allowed in this query (%s=%r)."
% (field.name, value)
)
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
insert_statement = self.connection.ops.insert_statement(
on_conflict=self.query.on_conflict,
)
result = ["%s %s" % (insert_statement, qn(opts.db_table))]
fields = self.query.fields or [opts.pk]
result.append("(%s)" % ", ".join(qn(f.column) for f in fields))
if self.query.fields:
value_rows = [
[
self.prepare_value(field, self.pre_save_val(field, obj))
for field in fields
]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [
[self.connection.ops.pk_default_value()] for _ in self.query.objs
]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (
not self.returning_fields and self.connection.features.has_bulk_insert
)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
on_conflict_suffix_sql = self.connection.ops.on_conflict_suffix_sql(
fields,
self.query.on_conflict,
self.query.update_fields,
self.query.unique_fields,
)
if (
self.returning_fields
and self.connection.features.can_return_columns_from_insert
):
if self.connection.features.can_return_rows_from_bulk_insert:
result.append(
self.connection.ops.bulk_insert_sql(fields, placeholder_rows)
)
params = param_rows
else:
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
params = [param_rows[0]]
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
# Skip empty r_sql to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
r_sql, self.returning_params = self.connection.ops.return_insert_columns(
self.returning_fields
)
if r_sql:
result.append(r_sql)
params += [self.returning_params]
return [(" ".join(result), tuple(chain.from_iterable(params)))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, returning_fields=None):
assert not (
returning_fields
and len(self.query.objs) != 1
and not self.connection.features.can_return_rows_from_bulk_insert
)
opts = self.query.get_meta()
self.returning_fields = returning_fields
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not self.returning_fields:
return []
if (
self.connection.features.can_return_rows_from_bulk_insert
and len(self.query.objs) > 1
):
rows = self.connection.ops.fetch_returned_insert_rows(cursor)
elif self.connection.features.can_return_columns_from_insert:
assert len(self.query.objs) == 1
rows = [
self.connection.ops.fetch_returned_insert_columns(
cursor,
self.returning_params,
)
]
else:
rows = [
(
self.connection.ops.last_insert_id(
cursor,
opts.db_table,
opts.pk.column,
),
)
]
cols = [field.get_col(opts.db_table) for field in self.returning_fields]
converters = self.get_converters(cols)
if converters:
rows = list(self.apply_converters(rows, converters))
return rows
class SQLDeleteCompiler(SQLCompiler):
@cached_property
def single_alias(self):
# Ensure base table is in aliases.
self.query.get_initial_alias()
return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1
@classmethod
def _expr_refs_base_model(cls, expr, base_model):
if isinstance(expr, Query):
return expr.model == base_model
if not hasattr(expr, "get_source_expressions"):
return False
return any(
cls._expr_refs_base_model(source_expr, base_model)
for source_expr in expr.get_source_expressions()
)
@cached_property
def contains_self_reference_subquery(self):
return any(
self._expr_refs_base_model(expr, self.query.model)
for expr in chain(
self.query.annotations.values(), self.query.where.children
)
)
def _as_sql(self, query):
result = ["DELETE FROM %s" % self.quote_name_unless_alias(query.base_table)]
where, params = self.compile(query.where)
if where:
result.append("WHERE %s" % where)
return " ".join(result), tuple(params)
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
if self.single_alias and not self.contains_self_reference_subquery:
return self._as_sql(self.query)
innerq = self.query.clone()
innerq.__class__ = Query
innerq.clear_select_clause()
pk = self.query.model._meta.pk
innerq.select = [pk.get_col(self.query.get_initial_alias())]
outerq = Query(self.query.model)
if not self.connection.features.update_can_self_select:
# Force the materialization of the inner query to allow reference
# to the target table on MySQL.
sql, params = innerq.get_compiler(connection=self.connection).as_sql()
innerq = RawSQL("SELECT * FROM (%s) subquery" % sql, params)
outerq.add_filter("pk__in", innerq)
return self._as_sql(outerq)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return "", ()
qn = self.quote_name_unless_alias
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, "resolve_expression"):
val = val.resolve_expression(
self.query, allow_joins=False, for_save=True
)
if val.contains_aggregate:
raise FieldError(
"Aggregate functions are not allowed in this query "
"(%s=%r)." % (field.name, val)
)
if val.contains_over_clause:
raise FieldError(
"Window expressions are not allowed in this query "
"(%s=%r)." % (field.name, val)
)
elif hasattr(val, "prepare_database_save"):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, "get_placeholder"):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = "%s"
name = field.column
if hasattr(val, "as_sql"):
sql, params = self.compile(val)
values.append("%s = %s" % (qn(name), placeholder % sql))
update_params.extend(params)
elif val is not None:
values.append("%s = %s" % (qn(name), placeholder))
update_params.append(val)
else:
values.append("%s = NULL" % qn(name))
table = self.query.base_table
result = [
"UPDATE %s SET" % qn(table),
", ".join(values),
]
where, params = self.compile(self.query.where)
if where:
result.append("WHERE %s" % where)
return " ".join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super().execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.chain(klass=Query)
query.select_related = False
query.clear_ordering(force=True)
query.extra = {}
query.select = []
meta = query.get_meta()
fields = [meta.pk.name]
related_ids_index = []
for related in self.query.related_updates:
if all(
path.join_field.primary_key for path in meta.get_path_to_parent(related)
):
# If a primary key chain exists to the targeted related update,
# then the meta.pk value can be used for it.
related_ids_index.append((related, 0))
else:
# This branch will only be reached when updating a field of an
# ancestor that is not part of the primary key chain of a MTI
# tree.
related_ids_index.append((related, len(fields)))
fields.append(related._meta.pk.name)
query.add_fields(fields)
super().pre_sql_setup()
must_pre_select = (
count > 1 and not self.connection.features.update_can_self_select
)
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.clear_where()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
related_ids = collections.defaultdict(list)
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
for parent, index in related_ids_index:
related_ids[parent].extend(r[index] for r in rows)
self.query.add_filter("pk__in", idents)
self.query.related_ids = related_ids
else:
# The fast path. Filters and updates in one query.
self.query.add_filter("pk__in", query)
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation)
ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ", ".join(sql)
params = tuple(params)
inner_query_sql, inner_query_params = self.query.inner_query.get_compiler(
self.using,
elide_empty=self.elide_empty,
).as_sql(with_col_aliases=True)
sql = "SELECT %s FROM (%s) subquery" % (sql, inner_query_sql)
params = params + inner_query_params
return sql, params
def cursor_iter(cursor, sentinel, col_count, itersize):
"""
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel):
yield rows if col_count is None else [r[:col_count] for r in rows]
finally:
cursor.close()
|
7c017e795ce6b3b047dcd0730d18afadbaf98aace43fa398b60ffef93dc4560c | """
Code to manage the creation and SQL rendering of 'where' constraints.
"""
import operator
from functools import reduce
from django.core.exceptions import EmptyResultSet
from django.db.models.expressions import Case, When
from django.db.models.lookups import Exact
from django.utils import tree
from django.utils.functional import cached_property
# Connection types
AND = "AND"
OR = "OR"
XOR = "XOR"
class WhereNode(tree.Node):
"""
An SQL WHERE clause.
The class is tied to the Query class that created it (in order to create
the correct SQL).
A child is usually an expression producing boolean values. Most likely the
expression is a Lookup instance.
However, a child could also be any class with as_sql() and either
relabeled_clone() method or relabel_aliases() and clone() methods and
contains_aggregate attribute.
"""
default = AND
resolved = False
conditional = True
def split_having_qualify(self, negated=False, must_group_by=False):
"""
Return three possibly None nodes: one for those parts of self that
should be included in the WHERE clause, one for those parts of self
that must be included in the HAVING clause, and one for those parts
that refer to window functions.
"""
if not self.contains_aggregate and not self.contains_over_clause:
return self, None, None
in_negated = negated ^ self.negated
# Whether or not children must be connected in the same filtering
# clause (WHERE > HAVING > QUALIFY) to maintain logical semantic.
must_remain_connected = (
(in_negated and self.connector == AND)
or (not in_negated and self.connector == OR)
or self.connector == XOR
)
if (
must_remain_connected
and self.contains_aggregate
and not self.contains_over_clause
):
# It's must cheaper to short-circuit and stash everything in the
# HAVING clause than split children if possible.
return None, self, None
where_parts = []
having_parts = []
qualify_parts = []
for c in self.children:
if hasattr(c, "split_having_qualify"):
where_part, having_part, qualify_part = c.split_having_qualify(
in_negated, must_group_by
)
if where_part is not None:
where_parts.append(where_part)
if having_part is not None:
having_parts.append(having_part)
if qualify_part is not None:
qualify_parts.append(qualify_part)
elif c.contains_over_clause:
qualify_parts.append(c)
elif c.contains_aggregate:
having_parts.append(c)
else:
where_parts.append(c)
if must_remain_connected and qualify_parts:
# Disjunctive heterogeneous predicates can be pushed down to
# qualify as long as no conditional aggregation is involved.
if not where_parts or (where_parts and not must_group_by):
return None, None, self
elif where_parts:
# In theory this should only be enforced when dealing with
# where_parts containing predicates against multi-valued
# relationships that could affect aggregation results but this
# is complex to infer properly.
raise NotImplementedError(
"Heterogeneous disjunctive predicates against window functions are "
"not implemented when performing conditional aggregation."
)
where_node = (
self.create(where_parts, self.connector, self.negated)
if where_parts
else None
)
having_node = (
self.create(having_parts, self.connector, self.negated)
if having_parts
else None
)
qualify_node = (
self.create(qualify_parts, self.connector, self.negated)
if qualify_parts
else None
)
return where_node, having_node, qualify_node
def as_sql(self, compiler, connection):
"""
Return the SQL version of the where clause and the value to be
substituted in. Return '', [] if this node matches everything,
None, [] if this node is empty, and raise EmptyResultSet if this
node can't match anything.
"""
result = []
result_params = []
if self.connector == AND:
full_needed, empty_needed = len(self.children), 1
else:
full_needed, empty_needed = 1, len(self.children)
if self.connector == XOR and not connection.features.supports_logical_xor:
# Convert if the database doesn't support XOR:
# a XOR b XOR c XOR ...
# to:
# (a OR b OR c OR ...) AND (a + b + c + ...) == 1
lhs = self.__class__(self.children, OR)
rhs_sum = reduce(
operator.add,
(Case(When(c, then=1), default=0) for c in self.children),
)
rhs = Exact(1, rhs_sum)
return self.__class__([lhs, rhs], AND, self.negated).as_sql(
compiler, connection
)
for child in self.children:
try:
sql, params = compiler.compile(child)
except EmptyResultSet:
empty_needed -= 1
else:
if sql:
result.append(sql)
result_params.extend(params)
else:
full_needed -= 1
# Check if this node matches nothing or everything.
# First check the amount of full nodes and empty nodes
# to make this node empty/full.
# Now, check if this node is full/empty using the
# counts.
if empty_needed == 0:
if self.negated:
return "", []
else:
raise EmptyResultSet
if full_needed == 0:
if self.negated:
raise EmptyResultSet
else:
return "", []
conn = " %s " % self.connector
sql_string = conn.join(result)
if sql_string:
if self.negated:
# Some backends (Oracle at least) need parentheses
# around the inner SQL in the negated case, even if the
# inner SQL contains just a single expression.
sql_string = "NOT (%s)" % sql_string
elif len(result) > 1 or self.resolved:
sql_string = "(%s)" % sql_string
return sql_string, result_params
def get_group_by_cols(self, alias=None):
cols = []
for child in self.children:
cols.extend(child.get_group_by_cols())
return cols
def get_source_expressions(self):
return self.children[:]
def set_source_expressions(self, children):
assert len(children) == len(self.children)
self.children = children
def relabel_aliases(self, change_map):
"""
Relabel the alias values of any children. 'change_map' is a dictionary
mapping old (current) alias values to the new values.
"""
for pos, child in enumerate(self.children):
if hasattr(child, "relabel_aliases"):
# For example another WhereNode
child.relabel_aliases(change_map)
elif hasattr(child, "relabeled_clone"):
self.children[pos] = child.relabeled_clone(change_map)
def clone(self):
clone = self.create(connector=self.connector, negated=self.negated)
for child in self.children:
if hasattr(child, "clone"):
child = child.clone()
clone.children.append(child)
return clone
def relabeled_clone(self, change_map):
clone = self.clone()
clone.relabel_aliases(change_map)
return clone
def replace_expressions(self, replacements):
if replacement := replacements.get(self):
return replacement
clone = self.create(connector=self.connector, negated=self.negated)
for child in self.children:
clone.children.append(child.replace_expressions(replacements))
return clone
@classmethod
def _contains_aggregate(cls, obj):
if isinstance(obj, tree.Node):
return any(cls._contains_aggregate(c) for c in obj.children)
return obj.contains_aggregate
@cached_property
def contains_aggregate(self):
return self._contains_aggregate(self)
@classmethod
def _contains_over_clause(cls, obj):
if isinstance(obj, tree.Node):
return any(cls._contains_over_clause(c) for c in obj.children)
return obj.contains_over_clause
@cached_property
def contains_over_clause(self):
return self._contains_over_clause(self)
@property
def is_summary(self):
return any(child.is_summary for child in self.children)
@staticmethod
def _resolve_leaf(expr, query, *args, **kwargs):
if hasattr(expr, "resolve_expression"):
expr = expr.resolve_expression(query, *args, **kwargs)
return expr
@classmethod
def _resolve_node(cls, node, query, *args, **kwargs):
if hasattr(node, "children"):
for child in node.children:
cls._resolve_node(child, query, *args, **kwargs)
if hasattr(node, "lhs"):
node.lhs = cls._resolve_leaf(node.lhs, query, *args, **kwargs)
if hasattr(node, "rhs"):
node.rhs = cls._resolve_leaf(node.rhs, query, *args, **kwargs)
def resolve_expression(self, *args, **kwargs):
clone = self.clone()
clone._resolve_node(clone, *args, **kwargs)
clone.resolved = True
return clone
@cached_property
def output_field(self):
from django.db.models import BooleanField
return BooleanField()
@property
def _output_field_or_none(self):
return self.output_field
def select_format(self, compiler, sql, params):
# Wrap filters with a CASE WHEN expression if a database backend
# (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP
# BY list.
if not compiler.connection.features.supports_boolean_expr_in_select_clause:
sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END"
return sql, params
def get_db_converters(self, connection):
return self.output_field.get_db_converters(connection)
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def leaves(self):
for child in self.children:
if isinstance(child, WhereNode):
yield from child.leaves()
else:
yield child
class NothingNode:
"""A node that matches nothing."""
contains_aggregate = False
contains_over_clause = False
def as_sql(self, compiler=None, connection=None):
raise EmptyResultSet
class ExtraWhere:
# The contents are a black box - assume no aggregates or windows are used.
contains_aggregate = False
contains_over_clause = False
def __init__(self, sqls, params):
self.sqls = sqls
self.params = params
def as_sql(self, compiler=None, connection=None):
sqls = ["(%s)" % sql for sql in self.sqls]
return " AND ".join(sqls), list(self.params or ())
class SubqueryConstraint:
# Even if aggregates or windows would be used in a subquery,
# the outer query isn't interested about those.
contains_aggregate = False
contains_over_clause = False
def __init__(self, alias, columns, targets, query_object):
self.alias = alias
self.columns = columns
self.targets = targets
query_object.clear_ordering(clear_default=True)
self.query_object = query_object
def as_sql(self, compiler, connection):
query = self.query_object
query.set_values(self.targets)
query_compiler = query.get_compiler(connection=connection)
return query_compiler.as_subquery_condition(self.alias, self.columns, compiler)
|
9d77e0e0e6db8c9843c95da2c0c9a794cc42321943d7e1278a7a3212aff47914 | from django.db import DatabaseError, InterfaceError
from django.db.backends.base.features import BaseDatabaseFeatures
from django.utils.functional import cached_property
class DatabaseFeatures(BaseDatabaseFeatures):
minimum_database_version = (19,)
# Oracle crashes with "ORA-00932: inconsistent datatypes: expected - got
# BLOB" when grouping by LOBs (#24096).
allows_group_by_lob = False
interprets_empty_strings_as_nulls = True
has_select_for_update = True
has_select_for_update_nowait = True
has_select_for_update_skip_locked = True
has_select_for_update_of = True
select_for_update_of_column = True
can_return_columns_from_insert = True
supports_subqueries_in_group_by = False
ignores_unnecessary_order_by_in_subqueries = False
supports_transactions = True
supports_timezones = False
has_native_duration_field = True
can_defer_constraint_checks = True
supports_partially_nullable_unique_constraints = False
supports_deferrable_unique_constraints = True
truncates_names = True
supports_tablespaces = True
supports_sequence_reset = False
can_introspect_materialized_views = True
atomic_transactions = False
nulls_order_largest = True
requires_literal_defaults = True
closed_cursor_error_class = InterfaceError
bare_select_suffix = " FROM DUAL"
# Select for update with limit can be achieved on Oracle, but not with the
# current backend.
supports_select_for_update_with_limit = False
supports_temporal_subtraction = True
# Oracle doesn't ignore quoted identifiers case but the current backend
# does by uppercasing all identifiers.
ignores_table_name_case = True
supports_index_on_text_field = False
create_test_procedure_without_params_sql = """
CREATE PROCEDURE "TEST_PROCEDURE" AS
V_I INTEGER;
BEGIN
V_I := 1;
END;
"""
create_test_procedure_with_int_param_sql = """
CREATE PROCEDURE "TEST_PROCEDURE" (P_I INTEGER) AS
V_I INTEGER;
BEGIN
V_I := P_I;
END;
"""
create_test_table_with_composite_primary_key = """
CREATE TABLE test_table_composite_pk (
column_1 NUMBER(11) NOT NULL,
column_2 NUMBER(11) NOT NULL,
PRIMARY KEY (column_1, column_2)
)
"""
supports_callproc_kwargs = True
supports_over_clause = True
supports_frame_range_fixed_distance = True
supports_ignore_conflicts = False
max_query_params = 2**16 - 1
supports_partial_indexes = False
can_rename_index = True
supports_slicing_ordering_in_compound = True
allows_multiple_constraints_on_same_fields = False
supports_boolean_expr_in_select_clause = False
supports_comparing_boolean_expr = False
supports_primitives_in_json_field = False
supports_json_field_contains = False
supports_collation_on_textfield = False
test_collations = {
"ci": "BINARY_CI",
"cs": "BINARY",
"non_default": "SWEDISH_CI",
"swedish_ci": "SWEDISH_CI",
}
test_now_utc_template = "CURRENT_TIMESTAMP AT TIME ZONE 'UTC'"
django_test_skips = {
"Oracle doesn't support SHA224.": {
"db_functions.text.test_sha224.SHA224Tests.test_basic",
"db_functions.text.test_sha224.SHA224Tests.test_transform",
},
"Oracle doesn't correctly calculate ISO 8601 week numbering before "
"1583 (the Gregorian calendar was introduced in 1582).": {
"db_functions.datetime.test_extract_trunc.DateFunctionTests."
"test_trunc_week_before_1000",
"db_functions.datetime.test_extract_trunc.DateFunctionWithTimeZoneTests."
"test_trunc_week_before_1000",
},
"Oracle extracts seconds including fractional seconds (#33517).": {
"db_functions.datetime.test_extract_trunc.DateFunctionTests."
"test_extract_second_func_no_fractional",
"db_functions.datetime.test_extract_trunc.DateFunctionWithTimeZoneTests."
"test_extract_second_func_no_fractional",
},
"Oracle doesn't support bitwise XOR.": {
"expressions.tests.ExpressionOperatorTests.test_lefthand_bitwise_xor",
"expressions.tests.ExpressionOperatorTests.test_lefthand_bitwise_xor_null",
"expressions.tests.ExpressionOperatorTests."
"test_lefthand_bitwise_xor_right_null",
},
"Oracle requires ORDER BY in row_number, ANSI:SQL doesn't.": {
"expressions_window.tests.WindowFunctionTests.test_row_number_no_ordering",
},
"Raises ORA-00600: internal error code.": {
"model_fields.test_jsonfield.TestQuerying.test_usage_in_subquery",
},
"Oracle doesn't support changing collations on indexed columns (#33671).": {
"migrations.test_operations.OperationTests."
"test_alter_field_pk_fk_db_collation",
},
}
django_test_expected_failures = {
# A bug in Django/cx_Oracle with respect to string handling (#23843).
"annotations.tests.NonAggregateAnnotationTestCase.test_custom_functions",
"annotations.tests.NonAggregateAnnotationTestCase."
"test_custom_functions_can_ref_other_functions",
}
@cached_property
def introspected_field_types(self):
return {
**super().introspected_field_types,
"GenericIPAddressField": "CharField",
"PositiveBigIntegerField": "BigIntegerField",
"PositiveIntegerField": "IntegerField",
"PositiveSmallIntegerField": "IntegerField",
"SmallIntegerField": "IntegerField",
"TimeField": "DateTimeField",
}
@cached_property
def supports_collation_on_charfield(self):
with self.connection.cursor() as cursor:
try:
cursor.execute("SELECT CAST('a' AS VARCHAR2(4001)) FROM dual")
except DatabaseError as e:
if e.args[0].code == 910:
return False
raise
return True
|
fa06ea9ac16faf9e69bf521e706c844d908c78a43ac5e9ff9b3ce6c53c4abaa5 | from django.db import ProgrammingError
from django.utils.functional import cached_property
class BaseDatabaseFeatures:
# An optional tuple indicating the minimum supported database version.
minimum_database_version = None
gis_enabled = False
# Oracle can't group by LOB (large object) data types.
allows_group_by_lob = True
allows_group_by_pk = False
allows_group_by_selected_pks = False
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate NULL rows in a nullable
# unique field? All core backends implement this correctly, but other
# databases such as SQL Server do not.
supports_nullable_unique_constraints = True
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists and some fields are nullable but not all of them?
supports_partially_nullable_unique_constraints = True
# Does the backend support initially deferrable unique constraints?
supports_deferrable_unique_constraints = False
can_use_chunked_reads = True
can_return_columns_from_insert = False
can_return_rows_from_bulk_insert = False
has_bulk_insert = True
uses_savepoints = True
can_release_savepoints = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries_with_in = True
has_select_for_update = False
has_select_for_update_nowait = False
has_select_for_update_skip_locked = False
has_select_for_update_of = False
has_select_for_no_key_update = False
# Does the database's SELECT FOR UPDATE OF syntax require a column rather
# than a table?
select_for_update_of_column = False
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does the backend truncate names properly when they are too long?
truncates_names = False
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
# Does the backend ignore unnecessary ORDER BY clauses in subqueries?
ignores_unnecessary_order_by_in_subqueries = True
# Is there a true datatype for uuid?
has_native_uuid_field = False
# Is there a true datatype for timedeltas?
has_native_duration_field = False
# Does the database driver supports same type temporal data subtraction
# by returning the type used to store duration field?
supports_temporal_subtraction = False
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# Does the database have a copy of the zoneinfo database?
has_zoneinfo_database = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Does the backend order NULL values as largest or smallest?
nulls_order_largest = False
# Does the backend support NULLS FIRST and NULLS LAST in ORDER BY?
supports_order_by_nulls_modifier = True
# Does the backend orders NULLS FIRST by default?
order_by_nulls_first = False
# The database's limit on the number of query parameters.
max_query_params = None
# Can an object have an autoincrement primary key of 0?
allows_auto_pk_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# Does the backend support tablespaces? Default to False because it isn't
# in the SQL standard.
supports_tablespaces = False
# Does the backend reset sequences between tests?
supports_sequence_reset = True
# Can the backend introspect the default value of a column?
can_introspect_default = True
# Confirm support for introspected foreign keys
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
can_introspect_foreign_keys = True
# Map fields which some backends may not be able to differentiate to the
# field it's introspected as.
introspected_field_types = {
"AutoField": "AutoField",
"BigAutoField": "BigAutoField",
"BigIntegerField": "BigIntegerField",
"BinaryField": "BinaryField",
"BooleanField": "BooleanField",
"CharField": "CharField",
"DurationField": "DurationField",
"GenericIPAddressField": "GenericIPAddressField",
"IntegerField": "IntegerField",
"PositiveBigIntegerField": "PositiveBigIntegerField",
"PositiveIntegerField": "PositiveIntegerField",
"PositiveSmallIntegerField": "PositiveSmallIntegerField",
"SmallAutoField": "SmallAutoField",
"SmallIntegerField": "SmallIntegerField",
"TimeField": "TimeField",
}
# Can the backend introspect the column order (ASC/DESC) for indexes?
supports_index_column_ordering = True
# Does the backend support introspection of materialized views?
can_introspect_materialized_views = False
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
# Does the backend prevent running SQL queries in broken transactions?
atomic_transactions = True
# Can we roll back DDL in a transaction?
can_rollback_ddl = False
# Does it support operations requiring references rename in a transaction?
supports_atomic_references_rename = True
# Can we issue more than one ALTER COLUMN clause in an ALTER TABLE?
supports_combined_alters = False
# Does it support foreign keys?
supports_foreign_keys = True
# Can it create foreign key constraints inline when adding columns?
can_create_inline_fk = True
# Can an index be renamed?
can_rename_index = False
# Does it automatically index foreign keys?
indexes_foreign_keys = True
# Does it support CHECK constraints?
supports_column_check_constraints = True
supports_table_check_constraints = True
# Does the backend support introspection of CHECK constraints?
can_introspect_check_constraints = True
# Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value})
# parameter passing? Note this can be provided by the backend even if not
# supported by the Python driver
supports_paramstyle_pyformat = True
# Does the backend require literal defaults, rather than parameterized ones?
requires_literal_defaults = False
# Does the backend require a connection reset after each material schema change?
connection_persists_old_columns = False
# What kind of error does the backend throw when accessing closed cursor?
closed_cursor_error_class = ProgrammingError
# Does 'a' LIKE 'A' match?
has_case_insensitive_like = False
# Suffix for backends that don't support "SELECT xxx;" queries.
bare_select_suffix = ""
# If NULL is implied on columns without needing to be explicitly specified
implied_column_null = False
# Does the backend support "select for update" queries with limit (and offset)?
supports_select_for_update_with_limit = True
# Does the backend ignore null expressions in GREATEST and LEAST queries unless
# every expression is null?
greatest_least_ignores_nulls = False
# Can the backend clone databases for parallel test execution?
# Defaults to False to allow third-party backends to opt-in.
can_clone_databases = False
# Does the backend consider table names with different casing to
# be equal?
ignores_table_name_case = False
# Place FOR UPDATE right after FROM clause. Used on MSSQL.
for_update_after_from = False
# Combinatorial flags
supports_select_union = True
supports_select_intersection = True
supports_select_difference = True
supports_slicing_ordering_in_compound = False
supports_parentheses_in_compound = True
# Does the database support SQL 2003 FILTER (WHERE ...) in aggregate
# expressions?
supports_aggregate_filter_clause = False
# Does the backend support indexing a TextField?
supports_index_on_text_field = True
# Does the backend support window expressions (expression OVER (...))?
supports_over_clause = False
supports_frame_range_fixed_distance = False
only_supports_unbounded_with_preceding_and_following = False
# Does the backend support CAST with precision?
supports_cast_with_precision = True
# How many second decimals does the database return when casting a value to
# a type with time?
time_cast_precision = 6
# SQL to create a procedure for use by the Django test suite. The
# functionality of the procedure isn't important.
create_test_procedure_without_params_sql = None
create_test_procedure_with_int_param_sql = None
# SQL to create a table with a composite primary key for use by the Django
# test suite.
create_test_table_with_composite_primary_key = None
# Does the backend support keyword parameters for cursor.callproc()?
supports_callproc_kwargs = False
# What formats does the backend EXPLAIN syntax support?
supported_explain_formats = set()
# Does the backend support the default parameter in lead() and lag()?
supports_default_in_lead_lag = True
# Does the backend support ignoring constraint or uniqueness errors during
# INSERT?
supports_ignore_conflicts = True
# Does the backend support updating rows on constraint or uniqueness errors
# during INSERT?
supports_update_conflicts = False
supports_update_conflicts_with_target = False
# Does this backend require casting the results of CASE expressions used
# in UPDATE statements to ensure the expression has the correct type?
requires_casted_case_in_updates = False
# Does the backend support partial indexes (CREATE INDEX ... WHERE ...)?
supports_partial_indexes = True
supports_functions_in_partial_indexes = True
# Does the backend support covering indexes (CREATE INDEX ... INCLUDE ...)?
supports_covering_indexes = False
# Does the backend support indexes on expressions?
supports_expression_indexes = True
# Does the backend treat COLLATE as an indexed expression?
collate_as_index_expression = False
# Does the database allow more than one constraint or index on the same
# field(s)?
allows_multiple_constraints_on_same_fields = True
# Does the backend support boolean expressions in SELECT and GROUP BY
# clauses?
supports_boolean_expr_in_select_clause = True
# Does the backend support comparing boolean expressions in WHERE clauses?
# Eg: WHERE (price > 0) IS NOT NULL
supports_comparing_boolean_expr = True
# Does the backend support JSONField?
supports_json_field = True
# Can the backend introspect a JSONField?
can_introspect_json_field = True
# Does the backend support primitives in JSONField?
supports_primitives_in_json_field = True
# Is there a true datatype for JSON?
has_native_json_field = False
# Does the backend use PostgreSQL-style JSON operators like '->'?
has_json_operators = False
# Does the backend support __contains and __contained_by lookups for
# a JSONField?
supports_json_field_contains = True
# Does value__d__contains={'f': 'g'} (without a list around the dict) match
# {'d': [{'f': 'g'}]}?
json_key_contains_list_matching_requires_list = False
# Does the backend support JSONObject() database function?
has_json_object_function = True
# Does the backend support column collations?
supports_collation_on_charfield = True
supports_collation_on_textfield = True
# Does the backend support non-deterministic collations?
supports_non_deterministic_collations = True
# Does the backend support the logical XOR operator?
supports_logical_xor = False
# Collation names for use by the Django test suite.
test_collations = {
"ci": None, # Case-insensitive.
"cs": None, # Case-sensitive.
"non_default": None, # Non-default.
"swedish_ci": None, # Swedish case-insensitive.
}
# SQL template override for tests.aggregation.tests.NowUTC
test_now_utc_template = None
# A set of dotted paths to tests in Django's test suite that are expected
# to fail on this database.
django_test_expected_failures = set()
# A map of reasons to sets of dotted paths to tests in Django's test suite
# that should be skipped for this database.
django_test_skips = {}
def __init__(self, connection):
self.connection = connection
@cached_property
def supports_explaining_query_execution(self):
"""Does this backend support explaining query execution?"""
return self.connection.ops.explain_prefix is not None
@cached_property
def supports_transactions(self):
"""Confirm support for transactions."""
with self.connection.cursor() as cursor:
cursor.execute("CREATE TABLE ROLLBACK_TEST (X INT)")
self.connection.set_autocommit(False)
cursor.execute("INSERT INTO ROLLBACK_TEST (X) VALUES (8)")
self.connection.rollback()
self.connection.set_autocommit(True)
cursor.execute("SELECT COUNT(X) FROM ROLLBACK_TEST")
(count,) = cursor.fetchone()
cursor.execute("DROP TABLE ROLLBACK_TEST")
return count == 0
def allows_group_by_selected_pks_on_model(self, model):
if not self.allows_group_by_selected_pks:
return False
return model._meta.managed
|
e50b522159e7c6a7d8cfad817b046bd206df3bf7fa8060bcbda8633bb8028d4f | import psycopg2
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.backends.ddl_references import IndexColumns
from django.db.backends.utils import strip_quotes
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
# Setting all constraints to IMMEDIATE to allow changing data in the same
# transaction.
sql_update_with_default = (
"UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL"
"; SET CONSTRAINTS ALL IMMEDIATE"
)
sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE"
sql_create_index = (
"CREATE INDEX %(name)s ON %(table)s%(using)s "
"(%(columns)s)%(include)s%(extra)s%(condition)s"
)
sql_create_index_concurrently = (
"CREATE INDEX CONCURRENTLY %(name)s ON %(table)s%(using)s "
"(%(columns)s)%(include)s%(extra)s%(condition)s"
)
sql_delete_index = "DROP INDEX IF EXISTS %(name)s"
sql_delete_index_concurrently = "DROP INDEX CONCURRENTLY IF EXISTS %(name)s"
# Setting the constraint to IMMEDIATE to allow changing data in the same
# transaction.
sql_create_column_inline_fk = (
"CONSTRAINT %(name)s REFERENCES %(to_table)s(%(to_column)s)%(deferrable)s"
"; SET CONSTRAINTS %(namespace)s%(name)s IMMEDIATE"
)
# Setting the constraint to IMMEDIATE runs any deferred checks to allow
# dropping it in the same transaction.
sql_delete_fk = (
"SET CONSTRAINTS %(name)s IMMEDIATE; "
"ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
)
sql_delete_procedure = "DROP FUNCTION %(procedure)s(%(param_types)s)"
sql_add_identity = (
"ALTER TABLE %(table)s ALTER COLUMN %(column)s ADD "
"GENERATED BY DEFAULT AS IDENTITY"
)
sql_drop_indentity = (
"ALTER TABLE %(table)s ALTER COLUMN %(column)s DROP IDENTITY IF EXISTS"
)
def quote_value(self, value):
if isinstance(value, str):
value = value.replace("%", "%%")
adapted = psycopg2.extensions.adapt(value)
if hasattr(adapted, "encoding"):
adapted.encoding = "utf8"
# getquoted() returns a quoted bytestring of the adapted value.
return adapted.getquoted().decode()
def _field_indexes_sql(self, model, field):
output = super()._field_indexes_sql(model, field)
like_index_statement = self._create_like_index_sql(model, field)
if like_index_statement is not None:
output.append(like_index_statement)
return output
def _field_data_type(self, field):
if field.is_relation:
return field.rel_db_type(self.connection)
return self.connection.data_types.get(
field.get_internal_type(),
field.db_type(self.connection),
)
def _field_base_data_types(self, field):
# Yield base data types for array fields.
if field.base_field.get_internal_type() == "ArrayField":
yield from self._field_base_data_types(field.base_field)
else:
yield self._field_data_type(field.base_field)
def _create_like_index_sql(self, model, field):
"""
Return the statement to create an index with varchar operator pattern
when the column type is 'varchar' or 'text', otherwise return None.
"""
db_type = field.db_type(connection=self.connection)
if db_type is not None and (field.db_index or field.unique):
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
#
# The same doesn't apply to array fields such as varchar[size]
# and text[size], so skip them.
if "[" in db_type:
return None
# Non-deterministic collations on Postgresql don't support indexes
# for operator classes varchar_pattern_ops/text_pattern_ops.
if getattr(field, "db_collation", None):
return None
if db_type.startswith("varchar"):
return self._create_index_sql(
model,
fields=[field],
suffix="_like",
opclasses=["varchar_pattern_ops"],
)
elif db_type.startswith("text"):
return self._create_index_sql(
model,
fields=[field],
suffix="_like",
opclasses=["text_pattern_ops"],
)
return None
def _using_sql(self, new_field, old_field):
using_sql = " USING %(column)s::%(type)s"
new_internal_type = new_field.get_internal_type()
old_internal_type = old_field.get_internal_type()
if new_internal_type == "ArrayField" and new_internal_type == old_internal_type:
# Compare base data types for array fields.
if list(self._field_base_data_types(old_field)) != list(
self._field_base_data_types(new_field)
):
return using_sql
elif self._field_data_type(old_field) != self._field_data_type(new_field):
return using_sql
return ""
def _alter_column_type_sql(self, model, old_field, new_field, new_type):
# Drop indexes on varchar/text/citext columns that are changing to a
# different type.
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params["type"]
if (old_field.db_index or old_field.unique) and (
(old_type.startswith("varchar") and not new_type.startswith("varchar"))
or (old_type.startswith("text") and not new_type.startswith("text"))
or (old_type.startswith("citext") and not new_type.startswith("citext"))
):
index_name = self._create_index_name(
model._meta.db_table, [old_field.column], suffix="_like"
)
self.execute(self._delete_index_sql(model, index_name))
self.sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
# Cast when data type changed.
if using_sql := self._using_sql(new_field, old_field):
self.sql_alter_column_type += using_sql
new_internal_type = new_field.get_internal_type()
old_internal_type = old_field.get_internal_type()
# Make ALTER TYPE with IDENTITY make sense.
table = strip_quotes(model._meta.db_table)
auto_field_types = {
"AutoField",
"BigAutoField",
"SmallAutoField",
}
old_is_auto = old_internal_type in auto_field_types
new_is_auto = new_internal_type in auto_field_types
if new_is_auto and not old_is_auto:
column = strip_quotes(new_field.column)
return (
(
self.sql_alter_column_type
% {
"column": self.quote_name(column),
"type": new_type,
},
[],
),
[
(
self.sql_add_identity
% {
"table": self.quote_name(table),
"column": self.quote_name(column),
},
[],
),
],
)
elif old_is_auto and not new_is_auto:
# Drop IDENTITY if exists (pre-Django 4.1 serial columns don't have
# it).
self.execute(
self.sql_drop_indentity
% {
"table": self.quote_name(table),
"column": self.quote_name(strip_quotes(new_field.column)),
}
)
column = strip_quotes(new_field.column)
sequence_name = "%s_%s_seq" % (table, column)
fragment, _ = super()._alter_column_type_sql(
model, old_field, new_field, new_type
)
return fragment, [
(
# Drop the sequence if exists (Django 4.1+ identity columns
# don't have it).
self.sql_delete_sequence
% {
"sequence": self.quote_name(sequence_name),
},
[],
),
]
else:
return super()._alter_column_type_sql(model, old_field, new_field, new_type)
def _alter_column_collation_sql(
self, model, new_field, new_type, new_collation, old_field
):
sql = self.sql_alter_column_collate
# Cast when data type changed.
if using_sql := self._using_sql(new_field, old_field):
sql += using_sql
return (
sql
% {
"column": self.quote_name(new_field.column),
"type": new_type,
"collation": " " + self._collate_sql(new_collation)
if new_collation
else "",
},
[],
)
def _alter_field(
self,
model,
old_field,
new_field,
old_type,
new_type,
old_db_params,
new_db_params,
strict=False,
):
super()._alter_field(
model,
old_field,
new_field,
old_type,
new_type,
old_db_params,
new_db_params,
strict,
)
# Added an index? Create any PostgreSQL-specific indexes.
if (not (old_field.db_index or old_field.unique) and new_field.db_index) or (
not old_field.unique and new_field.unique
):
like_index_statement = self._create_like_index_sql(model, new_field)
if like_index_statement is not None:
self.execute(like_index_statement)
# Removed an index? Drop any PostgreSQL-specific indexes.
if old_field.unique and not (new_field.db_index or new_field.unique):
index_to_remove = self._create_index_name(
model._meta.db_table, [old_field.column], suffix="_like"
)
self.execute(self._delete_index_sql(model, index_to_remove))
def _index_columns(self, table, columns, col_suffixes, opclasses):
if opclasses:
return IndexColumns(
table,
columns,
self.quote_name,
col_suffixes=col_suffixes,
opclasses=opclasses,
)
return super()._index_columns(table, columns, col_suffixes, opclasses)
def add_index(self, model, index, concurrently=False):
self.execute(
index.create_sql(model, self, concurrently=concurrently), params=None
)
def remove_index(self, model, index, concurrently=False):
self.execute(index.remove_sql(model, self, concurrently=concurrently))
def _delete_index_sql(self, model, name, sql=None, concurrently=False):
sql = (
self.sql_delete_index_concurrently
if concurrently
else self.sql_delete_index
)
return super()._delete_index_sql(model, name, sql)
def _create_index_sql(
self,
model,
*,
fields=None,
name=None,
suffix="",
using="",
db_tablespace=None,
col_suffixes=(),
sql=None,
opclasses=(),
condition=None,
concurrently=False,
include=None,
expressions=None,
):
sql = (
self.sql_create_index
if not concurrently
else self.sql_create_index_concurrently
)
return super()._create_index_sql(
model,
fields=fields,
name=name,
suffix=suffix,
using=using,
db_tablespace=db_tablespace,
col_suffixes=col_suffixes,
sql=sql,
opclasses=opclasses,
condition=condition,
include=include,
expressions=expressions,
)
|
4f3229c1a07a1a8eb429162afbbe425b855dcc7c6ec8fb24ab028a550b08c241 | import sys
import time
from importlib import import_module
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError, no_translations
from django.core.management.sql import emit_post_migrate_signal, emit_pre_migrate_signal
from django.db import DEFAULT_DB_ALIAS, connections, router
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import AmbiguityError
from django.db.migrations.state import ModelState, ProjectState
from django.utils.module_loading import module_has_submodule
from django.utils.text import Truncator
class Command(BaseCommand):
help = (
"Updates database schema. Manages both apps with migrations and those without."
)
requires_system_checks = []
def add_arguments(self, parser):
parser.add_argument(
"--skip-checks",
action="store_true",
help="Skip system checks.",
)
parser.add_argument(
"app_label",
nargs="?",
help="App label of an application to synchronize the state.",
)
parser.add_argument(
"migration_name",
nargs="?",
help="Database state will be brought to the state after that "
'migration. Use the name "zero" to unapply all migrations.',
)
parser.add_argument(
"--noinput",
"--no-input",
action="store_false",
dest="interactive",
help="Tells Django to NOT prompt the user for input of any kind.",
)
parser.add_argument(
"--database",
default=DEFAULT_DB_ALIAS,
help=(
'Nominates a database to synchronize. Defaults to the "default" '
"database."
),
)
parser.add_argument(
"--fake",
action="store_true",
help="Mark migrations as run without actually running them.",
)
parser.add_argument(
"--fake-initial",
action="store_true",
help=(
"Detect if tables already exist and fake-apply initial migrations if "
"so. Make sure that the current database schema matches your initial "
"migration before using this flag. Django will only check for an "
"existing table name."
),
)
parser.add_argument(
"--plan",
action="store_true",
help="Shows a list of the migration actions that will be performed.",
)
parser.add_argument(
"--run-syncdb",
action="store_true",
help="Creates tables for apps without migrations.",
)
parser.add_argument(
"--check",
action="store_true",
dest="check_unapplied",
help=(
"Exits with a non-zero status if unapplied migrations exist and does "
"not actually apply migrations."
),
)
parser.add_argument(
"--prune",
action="store_true",
dest="prune",
help="Delete nonexistent migrations from the django_migrations table.",
)
@no_translations
def handle(self, *args, **options):
database = options["database"]
if not options["skip_checks"]:
self.check(databases=[database])
self.verbosity = options["verbosity"]
self.interactive = options["interactive"]
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module(".management", app_config.name)
# Get the database we're operating from
connection = connections[database]
# Hook for backends needing any database preparation
connection.prepare_database()
# Work out which apps have migrations and which do not
executor = MigrationExecutor(connection, self.migration_progress_callback)
# Raise an error if any migrations are applied before their dependencies.
executor.loader.check_consistent_history(connection)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any
conflicts = executor.loader.detect_conflicts()
if conflicts:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app) for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they supplied command line arguments, work out what they mean.
run_syncdb = options["run_syncdb"]
target_app_labels_only = True
if options["app_label"]:
# Validate app_label.
app_label = options["app_label"]
try:
apps.get_app_config(app_label)
except LookupError as err:
raise CommandError(str(err))
if run_syncdb:
if app_label in executor.loader.migrated_apps:
raise CommandError(
"Can't use run_syncdb with app '%s' as it has migrations."
% app_label
)
elif app_label not in executor.loader.migrated_apps:
raise CommandError("App '%s' does not have migrations." % app_label)
if options["app_label"] and options["migration_name"]:
migration_name = options["migration_name"]
if migration_name == "zero":
targets = [(app_label, None)]
else:
try:
migration = executor.loader.get_migration_by_prefix(
app_label, migration_name
)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. "
"Please be more specific." % (migration_name, app_label)
)
except KeyError:
raise CommandError(
"Cannot find a migration matching '%s' from app '%s'."
% (migration_name, app_label)
)
target = (app_label, migration.name)
# Partially applied squashed migrations are not included in the
# graph, use the last replacement instead.
if (
target not in executor.loader.graph.nodes
and target in executor.loader.replacements
):
incomplete_migration = executor.loader.replacements[target]
target = incomplete_migration.replaces[-1]
targets = [target]
target_app_labels_only = False
elif options["app_label"]:
targets = [
key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label
]
else:
targets = executor.loader.graph.leaf_nodes()
if options["prune"]:
if not options["app_label"]:
raise CommandError(
"Migrations can be pruned only when an app is specified."
)
if self.verbosity > 0:
self.stdout.write("Pruning migrations:", self.style.MIGRATE_HEADING)
to_prune = set(executor.loader.applied_migrations) - set(
executor.loader.disk_migrations
)
squashed_migrations_with_deleted_replaced_migrations = [
migration_key
for migration_key, migration_obj in executor.loader.replacements.items()
if any(replaced in to_prune for replaced in migration_obj.replaces)
]
if squashed_migrations_with_deleted_replaced_migrations:
self.stdout.write(
self.style.NOTICE(
" Cannot use --prune because the following squashed "
"migrations have their 'replaces' attributes and may not "
"be recorded as applied:"
)
)
for migration in squashed_migrations_with_deleted_replaced_migrations:
app, name = migration
self.stdout.write(f" {app}.{name}")
self.stdout.write(
self.style.NOTICE(
" Re-run 'manage.py migrate' if they are not marked as "
"applied, and remove 'replaces' attributes in their "
"Migration classes."
)
)
else:
to_prune = sorted(
migration for migration in to_prune if migration[0] == app_label
)
if to_prune:
for migration in to_prune:
app, name = migration
if self.verbosity > 0:
self.stdout.write(
self.style.MIGRATE_LABEL(f" Pruning {app}.{name}"),
ending="",
)
executor.recorder.record_unapplied(app, name)
if self.verbosity > 0:
self.stdout.write(self.style.SUCCESS(" OK"))
elif self.verbosity > 0:
self.stdout.write(" No migrations to prune.")
plan = executor.migration_plan(targets)
exit_dry = plan and options["check_unapplied"]
if options["plan"]:
self.stdout.write("Planned operations:", self.style.MIGRATE_LABEL)
if not plan:
self.stdout.write(" No planned migration operations.")
for migration, backwards in plan:
self.stdout.write(str(migration), self.style.MIGRATE_HEADING)
for operation in migration.operations:
message, is_error = self.describe_operation(operation, backwards)
style = self.style.WARNING if is_error else None
self.stdout.write(" " + message, style)
if exit_dry:
sys.exit(1)
return
if exit_dry:
sys.exit(1)
if options["prune"]:
return
# At this point, ignore run_syncdb if there aren't any apps to sync.
run_syncdb = options["run_syncdb"] and executor.loader.unmigrated_apps
# Print some useful info
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:"))
if run_syncdb:
if options["app_label"]:
self.stdout.write(
self.style.MIGRATE_LABEL(
" Synchronize unmigrated app: %s" % app_label
)
)
else:
self.stdout.write(
self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ")
+ (", ".join(sorted(executor.loader.unmigrated_apps)))
)
if target_app_labels_only:
self.stdout.write(
self.style.MIGRATE_LABEL(" Apply all migrations: ")
+ (", ".join(sorted({a for a, n in targets})) or "(none)")
)
else:
if targets[0][1] is None:
self.stdout.write(
self.style.MIGRATE_LABEL(" Unapply all migrations: ")
+ str(targets[0][0])
)
else:
self.stdout.write(
self.style.MIGRATE_LABEL(" Target specific migration: ")
+ "%s, from %s" % (targets[0][1], targets[0][0])
)
pre_migrate_state = executor._create_project_state(with_applied_migrations=True)
pre_migrate_apps = pre_migrate_state.apps
emit_pre_migrate_signal(
self.verbosity,
self.interactive,
connection.alias,
stdout=self.stdout,
apps=pre_migrate_apps,
plan=plan,
)
# Run the syncdb phase.
if run_syncdb:
if self.verbosity >= 1:
self.stdout.write(
self.style.MIGRATE_HEADING("Synchronizing apps without migrations:")
)
if options["app_label"]:
self.sync_apps(connection, [app_label])
else:
self.sync_apps(connection, executor.loader.unmigrated_apps)
# Migrate!
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:"))
if not plan:
if self.verbosity >= 1:
self.stdout.write(" No migrations to apply.")
# If there's changes that aren't in migrations yet, tell them
# how to fix it.
autodetector = MigrationAutodetector(
executor.loader.project_state(),
ProjectState.from_apps(apps),
)
changes = autodetector.changes(graph=executor.loader.graph)
if changes:
self.stdout.write(
self.style.NOTICE(
" Your models in app(s): %s have changes that are not "
"yet reflected in a migration, and so won't be "
"applied." % ", ".join(repr(app) for app in sorted(changes))
)
)
self.stdout.write(
self.style.NOTICE(
" Run 'manage.py makemigrations' to make new "
"migrations, and then re-run 'manage.py migrate' to "
"apply them."
)
)
fake = False
fake_initial = False
else:
fake = options["fake"]
fake_initial = options["fake_initial"]
post_migrate_state = executor.migrate(
targets,
plan=plan,
state=pre_migrate_state.clone(),
fake=fake,
fake_initial=fake_initial,
)
# post_migrate signals have access to all models. Ensure that all models
# are reloaded in case any are delayed.
post_migrate_state.clear_delayed_apps_cache()
post_migrate_apps = post_migrate_state.apps
# Re-render models of real apps to include relationships now that
# we've got a final state. This wouldn't be necessary if real apps
# models were rendered with relationships in the first place.
with post_migrate_apps.bulk_update():
model_keys = []
for model_state in post_migrate_apps.real_models:
model_key = model_state.app_label, model_state.name_lower
model_keys.append(model_key)
post_migrate_apps.unregister_model(*model_key)
post_migrate_apps.render_multiple(
[ModelState.from_model(apps.get_model(*model)) for model in model_keys]
)
# Send the post_migrate signal, so individual apps can do whatever they need
# to do at this point.
emit_post_migrate_signal(
self.verbosity,
self.interactive,
connection.alias,
stdout=self.stdout,
apps=post_migrate_apps,
plan=plan,
)
def migration_progress_callback(self, action, migration=None, fake=False):
if self.verbosity >= 1:
compute_time = self.verbosity > 1
if action == "apply_start":
if compute_time:
self.start = time.monotonic()
self.stdout.write(" Applying %s..." % migration, ending="")
self.stdout.flush()
elif action == "apply_success":
elapsed = (
" (%.3fs)" % (time.monotonic() - self.start) if compute_time else ""
)
if fake:
self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.SUCCESS(" OK" + elapsed))
elif action == "unapply_start":
if compute_time:
self.start = time.monotonic()
self.stdout.write(" Unapplying %s..." % migration, ending="")
self.stdout.flush()
elif action == "unapply_success":
elapsed = (
" (%.3fs)" % (time.monotonic() - self.start) if compute_time else ""
)
if fake:
self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.SUCCESS(" OK" + elapsed))
elif action == "render_start":
if compute_time:
self.start = time.monotonic()
self.stdout.write(" Rendering model states...", ending="")
self.stdout.flush()
elif action == "render_success":
elapsed = (
" (%.3fs)" % (time.monotonic() - self.start) if compute_time else ""
)
self.stdout.write(self.style.SUCCESS(" DONE" + elapsed))
def sync_apps(self, connection, app_labels):
"""Run the old syncdb-style operation on a list of app_labels."""
with connection.cursor() as cursor:
tables = connection.introspection.table_names(cursor)
# Build the manifest of apps and models that are to be synchronized.
all_models = [
(
app_config.label,
router.get_migratable_models(
app_config, connection.alias, include_auto_created=False
),
)
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config.label in app_labels
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.identifier_converter
return not (
(converter(opts.db_table) in tables)
or (
opts.auto_created
and converter(opts.auto_created._meta.db_table) in tables
)
)
manifest = {
app_name: list(filter(model_installed, model_list))
for app_name, model_list in all_models
}
# Create the tables for each model
if self.verbosity >= 1:
self.stdout.write(" Creating tables...")
with connection.schema_editor() as editor:
for app_name, model_list in manifest.items():
for model in model_list:
# Never install unmanaged models, etc.
if not model._meta.can_migrate(connection):
continue
if self.verbosity >= 3:
self.stdout.write(
" Processing %s.%s model"
% (app_name, model._meta.object_name)
)
if self.verbosity >= 1:
self.stdout.write(
" Creating table %s" % model._meta.db_table
)
editor.create_model(model)
# Deferred SQL is executed when exiting the editor's context.
if self.verbosity >= 1:
self.stdout.write(" Running deferred SQL...")
@staticmethod
def describe_operation(operation, backwards):
"""Return a string that describes a migration operation for --plan."""
prefix = ""
is_error = False
if hasattr(operation, "code"):
code = operation.reverse_code if backwards else operation.code
action = (code.__doc__ or "") if code else None
elif hasattr(operation, "sql"):
action = operation.reverse_sql if backwards else operation.sql
else:
action = ""
if backwards:
prefix = "Undo "
if action is not None:
action = str(action).replace("\n", "")
elif backwards:
action = "IRREVERSIBLE"
is_error = True
if action:
action = " -> " + action
truncated = Truncator(action)
return prefix + operation.describe() + truncated.chars(40), is_error
|
5ab151338b6d726cffe4d0823ff585d77c5de8a13da1eae8c3219d204e2c5c96 | import copy
import json
import re
from functools import partial, update_wrapper
from urllib.parse import quote as urlquote
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.admin import helpers, widgets
from django.contrib.admin.checks import (
BaseModelAdminChecks,
InlineModelAdminChecks,
ModelAdminChecks,
)
from django.contrib.admin.decorators import display
from django.contrib.admin.exceptions import DisallowedModelAdminToField
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
NestedObjects,
construct_change_message,
flatten_fieldsets,
get_deleted_objects,
lookup_spawns_duplicates,
model_format_dict,
model_ngettext,
quote,
unquote,
)
from django.contrib.admin.widgets import AutocompleteSelect, AutocompleteSelectMultiple
from django.contrib.auth import get_permission_codename
from django.core.exceptions import (
FieldDoesNotExist,
FieldError,
PermissionDenied,
ValidationError,
)
from django.core.paginator import Paginator
from django.db import models, router, transaction
from django.db.models.constants import LOOKUP_SEP
from django.forms.formsets import DELETION_FIELD_NAME, all_valid
from django.forms.models import (
BaseInlineFormSet,
inlineformset_factory,
modelform_defines_fields,
modelform_factory,
modelformset_factory,
)
from django.forms.widgets import CheckboxSelectMultiple, SelectMultiple
from django.http import HttpResponseRedirect
from django.http.response import HttpResponseBase
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.html import format_html
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.text import (
capfirst,
format_lazy,
get_text_list,
smart_split,
unescape_string_literal,
)
from django.utils.translation import gettext as _
from django.utils.translation import ngettext
from django.views.decorators.csrf import csrf_protect
from django.views.generic import RedirectView
IS_POPUP_VAR = "_popup"
TO_FIELD_VAR = "_to_field"
HORIZONTAL, VERTICAL = 1, 2
def get_content_type_for_model(obj):
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(obj, for_concrete_model=False)
def get_ul_class(radio_style):
return "radiolist" if radio_style == VERTICAL else "radiolist inline"
class IncorrectLookupParameters(Exception):
pass
# Defaults for formfield_overrides. ModelAdmin subclasses can change this
# by adding to ModelAdmin.formfield_overrides.
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
"form_class": forms.SplitDateTimeField,
"widget": widgets.AdminSplitDateTime,
},
models.DateField: {"widget": widgets.AdminDateWidget},
models.TimeField: {"widget": widgets.AdminTimeWidget},
models.TextField: {"widget": widgets.AdminTextareaWidget},
models.URLField: {"widget": widgets.AdminURLFieldWidget},
models.IntegerField: {"widget": widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {"widget": widgets.AdminBigIntegerFieldWidget},
models.CharField: {"widget": widgets.AdminTextInputWidget},
models.ImageField: {"widget": widgets.AdminFileWidget},
models.FileField: {"widget": widgets.AdminFileWidget},
models.EmailField: {"widget": widgets.AdminEmailInputWidget},
models.UUIDField: {"widget": widgets.AdminUUIDInputWidget},
}
csrf_protect_m = method_decorator(csrf_protect)
class BaseModelAdmin(metaclass=forms.MediaDefiningClass):
"""Functionality common to both ModelAdmin and InlineAdmin."""
autocomplete_fields = ()
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
sortable_by = None
view_on_site = True
show_full_result_count = True
checks_class = BaseModelAdminChecks
def check(self, **kwargs):
return self.checks_class().check(self, **kwargs)
def __init__(self):
# Merge FORMFIELD_FOR_DBFIELD_DEFAULTS with the formfield_overrides
# rather than simply overwriting.
overrides = copy.deepcopy(FORMFIELD_FOR_DBFIELD_DEFAULTS)
for k, v in self.formfield_overrides.items():
overrides.setdefault(k, {}).update(v)
self.formfield_overrides = overrides
def formfield_for_dbfield(self, db_field, request, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
return self.formfield_for_choice_field(db_field, request, **kwargs)
# ForeignKey or ManyToManyFields
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
# Combine the field kwargs with any options for formfield_overrides.
# Make sure the passed in **kwargs override anything in
# formfield_overrides because **kwargs is more specific, and should
# always win.
if db_field.__class__ in self.formfield_overrides:
kwargs = {**self.formfield_overrides[db_field.__class__], **kwargs}
# Get the correct formfield.
if isinstance(db_field, models.ForeignKey):
formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)
elif isinstance(db_field, models.ManyToManyField):
formfield = self.formfield_for_manytomany(db_field, request, **kwargs)
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
if formfield and db_field.name not in self.raw_id_fields:
related_modeladmin = self.admin_site._registry.get(
db_field.remote_field.model
)
wrapper_kwargs = {}
if related_modeladmin:
wrapper_kwargs.update(
can_add_related=related_modeladmin.has_add_permission(request),
can_change_related=related_modeladmin.has_change_permission(
request
),
can_delete_related=related_modeladmin.has_delete_permission(
request
),
can_view_related=related_modeladmin.has_view_permission(
request
),
)
formfield.widget = widgets.RelatedFieldWidgetWrapper(
formfield.widget,
db_field.remote_field,
self.admin_site,
**wrapper_kwargs,
)
return formfield
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = {**copy.deepcopy(self.formfield_overrides[klass]), **kwargs}
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if "widget" not in kwargs:
kwargs["widget"] = widgets.AdminRadioSelect(
attrs={
"class": get_ul_class(self.radio_fields[db_field.name]),
}
)
if "choices" not in kwargs:
kwargs["choices"] = db_field.get_choices(
include_blank=db_field.blank, blank_choice=[("", _("None"))]
)
return db_field.formfield(**kwargs)
def get_field_queryset(self, db, db_field, request):
"""
If the ModelAdmin specifies ordering, the queryset should respect that
ordering. Otherwise don't specify the queryset, let the field decide
(return None in that case).
"""
related_admin = self.admin_site._registry.get(db_field.remote_field.model)
if related_admin is not None:
ordering = related_admin.get_ordering(request)
if ordering is not None and ordering != ():
return db_field.remote_field.model._default_manager.using(db).order_by(
*ordering
)
return None
def formfield_for_foreignkey(self, db_field, request, **kwargs):
"""
Get a form Field for a ForeignKey.
"""
db = kwargs.get("using")
if "widget" not in kwargs:
if db_field.name in self.get_autocomplete_fields(request):
kwargs["widget"] = AutocompleteSelect(
db_field, self.admin_site, using=db
)
elif db_field.name in self.raw_id_fields:
kwargs["widget"] = widgets.ForeignKeyRawIdWidget(
db_field.remote_field, self.admin_site, using=db
)
elif db_field.name in self.radio_fields:
kwargs["widget"] = widgets.AdminRadioSelect(
attrs={
"class": get_ul_class(self.radio_fields[db_field.name]),
}
)
kwargs["empty_label"] = (
kwargs.get("empty_label", _("None")) if db_field.blank else None
)
if "queryset" not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs["queryset"] = queryset
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
"""
Get a form Field for a ManyToManyField.
"""
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if not db_field.remote_field.through._meta.auto_created:
return None
db = kwargs.get("using")
if "widget" not in kwargs:
autocomplete_fields = self.get_autocomplete_fields(request)
if db_field.name in autocomplete_fields:
kwargs["widget"] = AutocompleteSelectMultiple(
db_field,
self.admin_site,
using=db,
)
elif db_field.name in self.raw_id_fields:
kwargs["widget"] = widgets.ManyToManyRawIdWidget(
db_field.remote_field,
self.admin_site,
using=db,
)
elif db_field.name in [*self.filter_vertical, *self.filter_horizontal]:
kwargs["widget"] = widgets.FilteredSelectMultiple(
db_field.verbose_name, db_field.name in self.filter_vertical
)
if "queryset" not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs["queryset"] = queryset
form_field = db_field.formfield(**kwargs)
if (
isinstance(form_field.widget, SelectMultiple)
and form_field.widget.allow_multiple_selected
and not isinstance(
form_field.widget, (CheckboxSelectMultiple, AutocompleteSelectMultiple)
)
):
msg = _(
"Hold down “Control”, or “Command” on a Mac, to select more than one."
)
help_text = form_field.help_text
form_field.help_text = (
format_lazy("{} {}", help_text, msg) if help_text else msg
)
return form_field
def get_autocomplete_fields(self, request):
"""
Return a list of ForeignKey and/or ManyToMany fields which should use
an autocomplete widget.
"""
return self.autocomplete_fields
def get_view_on_site_url(self, obj=None):
if obj is None or not self.view_on_site:
return None
if callable(self.view_on_site):
return self.view_on_site(obj)
elif hasattr(obj, "get_absolute_url"):
# use the ContentType lookup if view_on_site is True
return reverse(
"admin:view_on_site",
kwargs={
"content_type_id": get_content_type_for_model(obj).pk,
"object_id": obj.pk,
},
current_app=self.admin_site.name,
)
def get_empty_value_display(self):
"""
Return the empty_value_display set on ModelAdmin or AdminSite.
"""
try:
return mark_safe(self.empty_value_display)
except AttributeError:
return mark_safe(self.admin_site.empty_value_display)
def get_exclude(self, request, obj=None):
"""
Hook for specifying exclude.
"""
return self.exclude
def get_fields(self, request, obj=None):
"""
Hook for specifying fields.
"""
if self.fields:
return self.fields
# _get_form_for_get_fields() is implemented in subclasses.
form = self._get_form_for_get_fields(request, obj)
return [*form.base_fields, *self.get_readonly_fields(request, obj)]
def get_fieldsets(self, request, obj=None):
"""
Hook for specifying fieldsets.
"""
if self.fieldsets:
return self.fieldsets
return [(None, {"fields": self.get_fields(request, obj)})]
def get_inlines(self, request, obj):
"""Hook for specifying custom inlines."""
return self.inlines
def get_ordering(self, request):
"""
Hook for specifying field ordering.
"""
return self.ordering or () # otherwise we might try to *None, which is bad ;)
def get_readonly_fields(self, request, obj=None):
"""
Hook for specifying custom readonly fields.
"""
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
"""
Hook for specifying custom prepopulated fields.
"""
return self.prepopulated_fields
def get_queryset(self, request):
"""
Return a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_queryset()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
def get_sortable_by(self, request):
"""Hook for specifying which fields can be sorted in the changelist."""
return (
self.sortable_by
if self.sortable_by is not None
else self.get_list_display(request)
)
def lookup_allowed(self, lookup, value):
from django.contrib.admin.filters import SimpleListFilter
model = self.model
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for fk_lookup in model._meta.related_fkey_lookups:
# As ``limit_choices_to`` can be a callable, invoke it here.
if callable(fk_lookup):
fk_lookup = fk_lookup()
if (lookup, value) in widgets.url_params_from_lookup_dict(
fk_lookup
).items():
return True
relation_parts = []
prev_field = None
for part in lookup.split(LOOKUP_SEP):
try:
field = model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on nonexistent fields are ok, since they're ignored
# later.
break
# It is allowed to filter on values that would be found from local
# model anyways. For example, if you filter on employee__department__id,
# then the id value would be found already from employee__department_id.
if not prev_field or (
prev_field.is_relation
and field not in prev_field.path_infos[-1].target_fields
):
relation_parts.append(part)
if not getattr(field, "path_infos", None):
# This is not a relational field, so further parts
# must be transforms.
break
prev_field = field
model = field.path_infos[-1].to_opts.model
if len(relation_parts) <= 1:
# Either a local field filter, or no fields at all.
return True
valid_lookups = {self.date_hierarchy}
for filter_item in self.list_filter:
if isinstance(filter_item, type) and issubclass(
filter_item, SimpleListFilter
):
valid_lookups.add(filter_item.parameter_name)
elif isinstance(filter_item, (list, tuple)):
valid_lookups.add(filter_item[0])
else:
valid_lookups.add(filter_item)
# Is it a valid relational lookup?
return not {
LOOKUP_SEP.join(relation_parts),
LOOKUP_SEP.join(relation_parts + [part]),
}.isdisjoint(valid_lookups)
def to_field_allowed(self, request, to_field):
"""
Return True if the model associated with this admin should be
allowed to be referenced by the specified field.
"""
try:
field = self.opts.get_field(to_field)
except FieldDoesNotExist:
return False
# Always allow referencing the primary key since it's already possible
# to get this information from the change view URL.
if field.primary_key:
return True
# Allow reverse relationships to models defining m2m fields if they
# target the specified field.
for many_to_many in self.opts.many_to_many:
if many_to_many.m2m_target_field_name() == to_field:
return True
# Make sure at least one of the models registered for this site
# references this field through a FK or a M2M relationship.
registered_models = set()
for model, admin in self.admin_site._registry.items():
registered_models.add(model)
for inline in admin.inlines:
registered_models.add(inline.model)
related_objects = (
f
for f in self.opts.get_fields(include_hidden=True)
if (f.auto_created and not f.concrete)
)
for related_object in related_objects:
related_model = related_object.related_model
remote_field = related_object.field.remote_field
if (
any(issubclass(model, related_model) for model in registered_models)
and hasattr(remote_field, "get_related_field")
and remote_field.get_related_field() == field
):
return True
return False
def has_add_permission(self, request):
"""
Return True if the given request has permission to add an object.
Can be overridden by the user in subclasses.
"""
opts = self.opts
codename = get_permission_codename("add", opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_change_permission(self, request, obj=None):
"""
Return True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to change the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to change *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename("change", opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
"""
Return True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename("delete", opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_view_permission(self, request, obj=None):
"""
Return True if the given request has permission to view the given
Django model instance. The default implementation doesn't examine the
`obj` parameter.
If overridden by the user in subclasses, it should return True if the
given request has permission to view the `obj` model instance. If `obj`
is None, it should return True if the request has permission to view
any object of the given type.
"""
opts = self.opts
codename_view = get_permission_codename("view", opts)
codename_change = get_permission_codename("change", opts)
return request.user.has_perm(
"%s.%s" % (opts.app_label, codename_view)
) or request.user.has_perm("%s.%s" % (opts.app_label, codename_change))
def has_view_or_change_permission(self, request, obj=None):
return self.has_view_permission(request, obj) or self.has_change_permission(
request, obj
)
def has_module_permission(self, request):
"""
Return True if the given request has any permission in the given
app label.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to view the module on
the admin index page and access the module's index page. Overriding it
does not restrict access to the add, change or delete views. Use
`ModelAdmin.has_(add|change|delete)_permission` for that.
"""
return request.user.has_module_perms(self.opts.app_label)
class ModelAdmin(BaseModelAdmin):
"""Encapsulate all admin options and functionality for a given model."""
list_display = ("__str__",)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
search_help_text = None
date_hierarchy = None
save_as = False
save_as_continue = True
save_on_top = False
paginator = Paginator
preserve_filters = True
inlines = ()
# Custom templates (designed to be over-ridden in subclasses)
add_form_template = None
change_form_template = None
change_list_template = None
delete_confirmation_template = None
delete_selected_confirmation_template = None
object_history_template = None
popup_response_template = None
# Actions
actions = ()
action_form = helpers.ActionForm
actions_on_top = True
actions_on_bottom = False
actions_selection_counter = True
checks_class = ModelAdminChecks
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
super().__init__()
def __str__(self):
return "%s.%s" % (self.opts.app_label, self.__class__.__name__)
def __repr__(self):
return (
f"<{self.__class__.__qualname__}: model={self.model.__qualname__} "
f"site={self.admin_site!r}>"
)
def get_inline_instances(self, request, obj=None):
inline_instances = []
for inline_class in self.get_inlines(request, obj):
inline = inline_class(self.model, self.admin_site)
if request:
if not (
inline.has_view_or_change_permission(request, obj)
or inline.has_add_permission(request, obj)
or inline.has_delete_permission(request, obj)
):
continue
if not inline.has_add_permission(request, obj):
inline.max_num = 0
inline_instances.append(inline)
return inline_instances
def get_urls(self):
from django.urls import path
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
wrapper.model_admin = self
return update_wrapper(wrapper, view)
info = self.opts.app_label, self.opts.model_name
return [
path("", wrap(self.changelist_view), name="%s_%s_changelist" % info),
path("add/", wrap(self.add_view), name="%s_%s_add" % info),
path(
"<path:object_id>/history/",
wrap(self.history_view),
name="%s_%s_history" % info,
),
path(
"<path:object_id>/delete/",
wrap(self.delete_view),
name="%s_%s_delete" % info,
),
path(
"<path:object_id>/change/",
wrap(self.change_view),
name="%s_%s_change" % info,
),
# For backwards compatibility (was the change url before 1.9)
path(
"<path:object_id>/",
wrap(
RedirectView.as_view(
pattern_name="%s:%s_%s_change"
% ((self.admin_site.name,) + info)
)
),
),
]
@property
def urls(self):
return self.get_urls()
@property
def media(self):
extra = "" if settings.DEBUG else ".min"
js = [
"vendor/jquery/jquery%s.js" % extra,
"jquery.init.js",
"core.js",
"admin/RelatedObjectLookups.js",
"actions.js",
"urlify.js",
"prepopulate.js",
"vendor/xregexp/xregexp%s.js" % extra,
]
return forms.Media(js=["admin/js/%s" % url for url in js])
def get_model_perms(self, request):
"""
Return a dict of all perms for this model. This dict has the keys
``add``, ``change``, ``delete``, and ``view`` mapping to the True/False
for each of those actions.
"""
return {
"add": self.has_add_permission(request),
"change": self.has_change_permission(request),
"delete": self.has_delete_permission(request),
"view": self.has_view_permission(request),
}
def _get_form_for_get_fields(self, request, obj):
return self.get_form(request, obj, fields=None)
def get_form(self, request, obj=None, change=False, **kwargs):
"""
Return a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if "fields" in kwargs:
fields = kwargs.pop("fields")
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
excluded = self.get_exclude(request, obj)
exclude = [] if excluded is None else list(excluded)
readonly_fields = self.get_readonly_fields(request, obj)
exclude.extend(readonly_fields)
# Exclude all fields if it's a change form and the user doesn't have
# the change permission.
if (
change
and hasattr(request, "user")
and not self.has_change_permission(request, obj)
):
exclude.extend(fields)
if excluded is None and hasattr(self.form, "_meta") and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# if exclude is an empty list we pass None to be consistent with the
# default on modelform_factory
exclude = exclude or None
# Remove declared form fields which are in readonly_fields.
new_attrs = dict.fromkeys(
f for f in readonly_fields if f in self.form.declared_fields
)
form = type(self.form.__name__, (self.form,), new_attrs)
defaults = {
"form": form,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
**kwargs,
}
if defaults["fields"] is None and not modelform_defines_fields(
defaults["form"]
):
defaults["fields"] = forms.ALL_FIELDS
try:
return modelform_factory(self.model, **defaults)
except FieldError as e:
raise FieldError(
"%s. Check fields/fieldsets/exclude attributes of class %s."
% (e, self.__class__.__name__)
)
def get_changelist(self, request, **kwargs):
"""
Return the ChangeList class for use on the changelist page.
"""
from django.contrib.admin.views.main import ChangeList
return ChangeList
def get_changelist_instance(self, request):
"""
Return a `ChangeList` instance based on `request`. May raise
`IncorrectLookupParameters`.
"""
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
# Add the action checkboxes if any actions are available.
if self.get_actions(request):
list_display = ["action_checkbox", *list_display]
sortable_by = self.get_sortable_by(request)
ChangeList = self.get_changelist(request)
return ChangeList(
request,
self.model,
list_display,
list_display_links,
self.get_list_filter(request),
self.date_hierarchy,
self.get_search_fields(request),
self.get_list_select_related(request),
self.list_per_page,
self.list_max_show_all,
self.list_editable,
self,
sortable_by,
self.search_help_text,
)
def get_object(self, request, object_id, from_field=None):
"""
Return an instance matching the field and value provided, the primary
key is used if no field is provided. Return ``None`` if no match is
found or the object_id fails validation.
"""
queryset = self.get_queryset(request)
model = queryset.model
field = (
model._meta.pk if from_field is None else model._meta.get_field(from_field)
)
try:
object_id = field.to_python(object_id)
return queryset.get(**{field.name: object_id})
except (model.DoesNotExist, ValidationError, ValueError):
return None
def get_changelist_form(self, request, **kwargs):
"""
Return a Form class for use in the Formset on the changelist page.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
**kwargs,
}
if defaults.get("fields") is None and not modelform_defines_fields(
defaults.get("form")
):
defaults["fields"] = forms.ALL_FIELDS
return modelform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
"""
Return a FormSet class for use on the changelist page if list_editable
is used.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
**kwargs,
}
return modelformset_factory(
self.model,
self.get_changelist_form(request),
extra=0,
fields=self.list_editable,
**defaults,
)
def get_formsets_with_inlines(self, request, obj=None):
"""
Yield formsets and the corresponding inlines.
"""
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj), inline
def get_paginator(
self, request, queryset, per_page, orphans=0, allow_empty_first_page=True
):
return self.paginator(queryset, per_page, orphans, allow_empty_first_page)
def log_addition(self, request, obj, message):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import ADDITION, LogEntry
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj).pk,
object_id=obj.pk,
object_repr=str(obj),
action_flag=ADDITION,
change_message=message,
)
def log_change(self, request, obj, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import CHANGE, LogEntry
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj).pk,
object_id=obj.pk,
object_repr=str(obj),
action_flag=CHANGE,
change_message=message,
)
def log_deletion(self, request, obj, object_repr):
"""
Log that an object will be deleted. Note that this method must be
called before the deletion.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import DELETION, LogEntry
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj).pk,
object_id=obj.pk,
object_repr=object_repr,
action_flag=DELETION,
)
@display(description=mark_safe('<input type="checkbox" id="action-toggle">'))
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, str(obj.pk))
@staticmethod
def _get_action_description(func, name):
return getattr(func, "short_description", capfirst(name.replace("_", " ")))
def _get_base_actions(self):
"""Return the list of actions, prior to any request-based filtering."""
actions = []
base_actions = (self.get_action(action) for action in self.actions or [])
# get_action might have returned None, so filter any of those out.
base_actions = [action for action in base_actions if action]
base_action_names = {name for _, name, _ in base_actions}
# Gather actions from the admin site first
for (name, func) in self.admin_site.actions:
if name in base_action_names:
continue
description = self._get_action_description(func, name)
actions.append((func, name, description))
# Add actions from this ModelAdmin.
actions.extend(base_actions)
return actions
def _filter_actions_by_permissions(self, request, actions):
"""Filter out any actions that the user doesn't have access to."""
filtered_actions = []
for action in actions:
callable = action[0]
if not hasattr(callable, "allowed_permissions"):
filtered_actions.append(action)
continue
permission_checks = (
getattr(self, "has_%s_permission" % permission)
for permission in callable.allowed_permissions
)
if any(has_permission(request) for has_permission in permission_checks):
filtered_actions.append(action)
return filtered_actions
def get_actions(self, request):
"""
Return a dictionary mapping the names of all actions for this
ModelAdmin to a tuple of (callable, name, description) for each action.
"""
# If self.actions is set to None that means actions are disabled on
# this page.
if self.actions is None or IS_POPUP_VAR in request.GET:
return {}
actions = self._filter_actions_by_permissions(request, self._get_base_actions())
return {name: (func, name, desc) for func, name, desc in actions}
def get_action_choices(self, request, default_choices=models.BLANK_CHOICE_DASH):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = [] + default_choices
for func, name, description in self.get_actions(request).values():
choice = (name, description % model_format_dict(self.opts))
choices.append(choice)
return choices
def get_action(self, action):
"""
Return a given action from a parameter, which can either be a callable,
or the name of a method on the ModelAdmin. Return is a tuple of
(callable, name, description).
"""
# If the action is a callable, just use it.
if callable(action):
func = action
action = action.__name__
# Next, look for a method. Grab it off self.__class__ to get an unbound
# method instead of a bound one; this ensures that the calling
# conventions are the same for functions and methods.
elif hasattr(self.__class__, action):
func = getattr(self.__class__, action)
# Finally, look for a named method on the admin site
else:
try:
func = self.admin_site.get_action(action)
except KeyError:
return None
description = self._get_action_description(func, action)
return func, action, description
def get_list_display(self, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
return self.list_display
def get_list_display_links(self, request, list_display):
"""
Return a sequence containing the fields to be displayed as links
on the changelist. The list_display parameter is the list of fields
returned by get_list_display().
"""
if (
self.list_display_links
or self.list_display_links is None
or not list_display
):
return self.list_display_links
else:
# Use only the first item in list_display as link
return list(list_display)[:1]
def get_list_filter(self, request):
"""
Return a sequence containing the fields to be displayed as filters in
the right sidebar of the changelist page.
"""
return self.list_filter
def get_list_select_related(self, request):
"""
Return a list of fields to add to the select_related() part of the
changelist items query.
"""
return self.list_select_related
def get_search_fields(self, request):
"""
Return a sequence containing the fields to be searched whenever
somebody submits a search query.
"""
return self.search_fields
def get_search_results(self, request, queryset, search_term):
"""
Return a tuple containing a queryset to implement the search
and a boolean indicating if the results may contain duplicates.
"""
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith("^"):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith("="):
return "%s__iexact" % field_name[1:]
elif field_name.startswith("@"):
return "%s__search" % field_name[1:]
# Use field_name if it includes a lookup.
opts = queryset.model._meta
lookup_fields = field_name.split(LOOKUP_SEP)
# Go through the fields, following all relations.
prev_field = None
for path_part in lookup_fields:
if path_part == "pk":
path_part = opts.pk.name
try:
field = opts.get_field(path_part)
except FieldDoesNotExist:
# Use valid query lookups.
if prev_field and prev_field.get_lookup(path_part):
return field_name
else:
prev_field = field
if hasattr(field, "path_infos"):
# Update opts to follow the relation.
opts = field.path_infos[-1].to_opts
# Otherwise, use the field with icontains.
return "%s__icontains" % field_name
may_have_duplicates = False
search_fields = self.get_search_fields(request)
if search_fields and search_term:
orm_lookups = [
construct_search(str(search_field)) for search_field in search_fields
]
term_queries = []
for bit in smart_split(search_term):
if bit.startswith(('"', "'")) and bit[0] == bit[-1]:
bit = unescape_string_literal(bit)
or_queries = models.Q.create(
[(orm_lookup, bit) for orm_lookup in orm_lookups],
connector=models.Q.OR,
)
term_queries.append(or_queries)
queryset = queryset.filter(models.Q.create(term_queries))
may_have_duplicates |= any(
lookup_spawns_duplicates(self.opts, search_spec)
for search_spec in orm_lookups
)
return queryset, may_have_duplicates
def get_preserved_filters(self, request):
"""
Return the preserved filters querystring.
"""
match = request.resolver_match
if self.preserve_filters and match:
current_url = "%s:%s" % (match.app_name, match.url_name)
changelist_url = "admin:%s_%s_changelist" % (
self.opts.app_label,
self.opts.model_name,
)
if current_url == changelist_url:
preserved_filters = request.GET.urlencode()
else:
preserved_filters = request.GET.get("_changelist_filters")
if preserved_filters:
return urlencode({"_changelist_filters": preserved_filters})
return ""
def construct_change_message(self, request, form, formsets, add=False):
"""
Construct a JSON structure describing changes from a changed object.
"""
return construct_change_message(form, formsets, add)
def message_user(
self, request, message, level=messages.INFO, extra_tags="", fail_silently=False
):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
Exposes almost the same API as messages.add_message(), but accepts the
positional arguments in a different order to maintain backwards
compatibility. For convenience, it accepts the `level` argument as
a string rather than the usual level number.
"""
if not isinstance(level, int):
# attempt to get the level if passed a string
try:
level = getattr(messages.constants, level.upper())
except AttributeError:
levels = messages.constants.DEFAULT_TAGS.values()
levels_repr = ", ".join("`%s`" % level for level in levels)
raise ValueError(
"Bad message level string: `%s`. Possible values are: %s"
% (level, levels_repr)
)
messages.add_message(
request, level, message, extra_tags=extra_tags, fail_silently=fail_silently
)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
obj.save()
def delete_model(self, request, obj):
"""
Given a model instance delete it from the database.
"""
obj.delete()
def delete_queryset(self, request, queryset):
"""Given a queryset, delete it from the database."""
queryset.delete()
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
def save_related(self, request, form, formsets, change):
"""
Given the ``HttpRequest``, the parent ``ModelForm`` instance, the
list of inline formsets and a boolean value based on whether the
parent is being added or changed, save the related objects to the
database. Note that at this point save_form() and save_model() have
already been called.
"""
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=change)
def render_change_form(
self, request, context, add=False, change=False, form_url="", obj=None
):
app_label = self.opts.app_label
preserved_filters = self.get_preserved_filters(request)
form_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": self.opts}, form_url
)
view_on_site_url = self.get_view_on_site_url(obj)
has_editable_inline_admin_formsets = False
for inline in context["inline_admin_formsets"]:
if (
inline.has_add_permission
or inline.has_change_permission
or inline.has_delete_permission
):
has_editable_inline_admin_formsets = True
break
context.update(
{
"add": add,
"change": change,
"has_view_permission": self.has_view_permission(request, obj),
"has_add_permission": self.has_add_permission(request),
"has_change_permission": self.has_change_permission(request, obj),
"has_delete_permission": self.has_delete_permission(request, obj),
"has_editable_inline_admin_formsets": (
has_editable_inline_admin_formsets
),
"has_file_field": context["adminform"].form.is_multipart()
or any(
admin_formset.formset.is_multipart()
for admin_formset in context["inline_admin_formsets"]
),
"has_absolute_url": view_on_site_url is not None,
"absolute_url": view_on_site_url,
"form_url": form_url,
"opts": self.opts,
"content_type_id": get_content_type_for_model(self.model).pk,
"save_as": self.save_as,
"save_on_top": self.save_on_top,
"to_field_var": TO_FIELD_VAR,
"is_popup_var": IS_POPUP_VAR,
"app_label": app_label,
}
)
if add and self.add_form_template is not None:
form_template = self.add_form_template
else:
form_template = self.change_form_template
request.current_app = self.admin_site.name
return TemplateResponse(
request,
form_template
or [
"admin/%s/%s/change_form.html" % (app_label, self.opts.model_name),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html",
],
context,
)
def response_add(self, request, obj, post_url_continue=None):
"""
Determine the HttpResponse for the add_view stage.
"""
opts = obj._meta
preserved_filters = self.get_preserved_filters(request)
obj_url = reverse(
"admin:%s_%s_change" % (opts.app_label, opts.model_name),
args=(quote(obj.pk),),
current_app=self.admin_site.name,
)
# Add a link to the object's change form if the user can edit the obj.
if self.has_change_permission(request, obj):
obj_repr = format_html('<a href="{}">{}</a>', urlquote(obj_url), obj)
else:
obj_repr = str(obj)
msg_dict = {
"name": opts.verbose_name,
"obj": obj_repr,
}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = obj._meta.pk.attname
value = obj.serializable_value(attr)
popup_response_data = json.dumps(
{
"value": str(value),
"obj": str(obj),
}
)
return TemplateResponse(
request,
self.popup_response_template
or [
"admin/%s/%s/popup_response.html"
% (opts.app_label, opts.model_name),
"admin/%s/popup_response.html" % opts.app_label,
"admin/popup_response.html",
],
{
"popup_response_data": popup_response_data,
},
)
elif "_continue" in request.POST or (
# Redirecting after "Save as new".
"_saveasnew" in request.POST
and self.save_as_continue
and self.has_change_permission(request, obj)
):
msg = _("The {name} “{obj}” was added successfully.")
if self.has_change_permission(request, obj):
msg += " " + _("You may edit it again below.")
self.message_user(request, format_html(msg, **msg_dict), messages.SUCCESS)
if post_url_continue is None:
post_url_continue = obj_url
post_url_continue = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts},
post_url_continue,
)
return HttpResponseRedirect(post_url_continue)
elif "_addanother" in request.POST:
msg = format_html(
_(
"The {name} “{obj}” was added successfully. You may add another "
"{name} below."
),
**msg_dict,
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts}, redirect_url
)
return HttpResponseRedirect(redirect_url)
else:
msg = format_html(
_("The {name} “{obj}” was added successfully."), **msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_add(request, obj)
def response_change(self, request, obj):
"""
Determine the HttpResponse for the change_view stage.
"""
if IS_POPUP_VAR in request.POST:
opts = obj._meta
to_field = request.POST.get(TO_FIELD_VAR)
attr = str(to_field) if to_field else opts.pk.attname
value = request.resolver_match.kwargs["object_id"]
new_value = obj.serializable_value(attr)
popup_response_data = json.dumps(
{
"action": "change",
"value": str(value),
"obj": str(obj),
"new_value": str(new_value),
}
)
return TemplateResponse(
request,
self.popup_response_template
or [
"admin/%s/%s/popup_response.html"
% (opts.app_label, opts.model_name),
"admin/%s/popup_response.html" % opts.app_label,
"admin/popup_response.html",
],
{
"popup_response_data": popup_response_data,
},
)
opts = self.opts
preserved_filters = self.get_preserved_filters(request)
msg_dict = {
"name": opts.verbose_name,
"obj": format_html('<a href="{}">{}</a>', urlquote(request.path), obj),
}
if "_continue" in request.POST:
msg = format_html(
_(
"The {name} “{obj}” was changed successfully. You may edit it "
"again below."
),
**msg_dict,
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts}, redirect_url
)
return HttpResponseRedirect(redirect_url)
elif "_saveasnew" in request.POST:
msg = format_html(
_(
"The {name} “{obj}” was added successfully. You may edit it again "
"below."
),
**msg_dict,
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse(
"admin:%s_%s_change" % (opts.app_label, opts.model_name),
args=(obj.pk,),
current_app=self.admin_site.name,
)
redirect_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts}, redirect_url
)
return HttpResponseRedirect(redirect_url)
elif "_addanother" in request.POST:
msg = format_html(
_(
"The {name} “{obj}” was changed successfully. You may add another "
"{name} below."
),
**msg_dict,
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse(
"admin:%s_%s_add" % (opts.app_label, opts.model_name),
current_app=self.admin_site.name,
)
redirect_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts}, redirect_url
)
return HttpResponseRedirect(redirect_url)
else:
msg = format_html(
_("The {name} “{obj}” was changed successfully."), **msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_change(request, obj)
def _response_post_save(self, request, obj):
if self.has_view_or_change_permission(request):
post_url = reverse(
"admin:%s_%s_changelist" % (self.opts.app_label, self.opts.model_name),
current_app=self.admin_site.name,
)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": self.opts}, post_url
)
else:
post_url = reverse("admin:index", current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_post_save_add(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when adding a new object.
"""
return self._response_post_save(request, obj)
def response_post_save_change(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when editing an existing object.
"""
return self._response_post_save(request, obj)
def response_action(self, request, queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get("index", 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({"action": data.getlist("action")[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields["action"].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data["action"]
select_across = action_form.cleaned_data["select_across"]
func = self.get_actions(request)[action][0]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _(
"Items must be selected in order to perform "
"actions on them. No items have been changed."
)
self.message_user(request, msg, messages.WARNING)
return None
if not select_across:
# Perform the action only on the selected objects
queryset = queryset.filter(pk__in=selected)
response = func(self, request, queryset)
# Actions may return an HttpResponse-like object, which will be
# used as the response from the POST. If not, we'll be a good
# little HTTP citizen and redirect back to the changelist page.
if isinstance(response, HttpResponseBase):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg, messages.WARNING)
return None
def response_delete(self, request, obj_display, obj_id):
"""
Determine the HttpResponse for the delete_view stage.
"""
if IS_POPUP_VAR in request.POST:
popup_response_data = json.dumps(
{
"action": "delete",
"value": str(obj_id),
}
)
return TemplateResponse(
request,
self.popup_response_template
or [
"admin/%s/%s/popup_response.html"
% (self.opts.app_label, self.opts.model_name),
"admin/%s/popup_response.html" % self.opts.app_label,
"admin/popup_response.html",
],
{
"popup_response_data": popup_response_data,
},
)
self.message_user(
request,
_("The %(name)s “%(obj)s” was deleted successfully.")
% {
"name": self.opts.verbose_name,
"obj": obj_display,
},
messages.SUCCESS,
)
if self.has_change_permission(request, None):
post_url = reverse(
"admin:%s_%s_changelist" % (self.opts.app_label, self.opts.model_name),
current_app=self.admin_site.name,
)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": self.opts}, post_url
)
else:
post_url = reverse("admin:index", current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def render_delete_form(self, request, context):
app_label = self.opts.app_label
request.current_app = self.admin_site.name
context.update(
to_field_var=TO_FIELD_VAR,
is_popup_var=IS_POPUP_VAR,
media=self.media,
)
return TemplateResponse(
request,
self.delete_confirmation_template
or [
"admin/{}/{}/delete_confirmation.html".format(
app_label, self.opts.model_name
),
"admin/{}/delete_confirmation.html".format(app_label),
"admin/delete_confirmation.html",
],
context,
)
def get_inline_formsets(self, request, formsets, inline_instances, obj=None):
# Edit permissions on parent model are required for editable inlines.
can_edit_parent = (
self.has_change_permission(request, obj)
if obj
else self.has_add_permission(request)
)
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
if can_edit_parent:
has_add_permission = inline.has_add_permission(request, obj)
has_change_permission = inline.has_change_permission(request, obj)
has_delete_permission = inline.has_delete_permission(request, obj)
else:
# Disable all edit-permissions, and override formset settings.
has_add_permission = (
has_change_permission
) = has_delete_permission = False
formset.extra = formset.max_num = 0
has_view_permission = inline.has_view_permission(request, obj)
prepopulated = dict(inline.get_prepopulated_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(
inline,
formset,
fieldsets,
prepopulated,
readonly,
model_admin=self,
has_add_permission=has_add_permission,
has_change_permission=has_change_permission,
has_delete_permission=has_delete_permission,
has_view_permission=has_view_permission,
)
inline_admin_formsets.append(inline_admin_formset)
return inline_admin_formsets
def get_changeform_initial_data(self, request):
"""
Get the initial form data from the request's GET params.
"""
initial = dict(request.GET.items())
for k in initial:
try:
f = self.opts.get_field(k)
except FieldDoesNotExist:
continue
# We have to special-case M2Ms as a list of comma-separated PKs.
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
return initial
def _get_obj_does_not_exist_redirect(self, request, opts, object_id):
"""
Create a message informing the user that the object doesn't exist
and return a redirect to the admin index page.
"""
msg = _("%(name)s with ID “%(key)s” doesn’t exist. Perhaps it was deleted?") % {
"name": opts.verbose_name,
"key": unquote(object_id),
}
self.message_user(request, msg, messages.WARNING)
url = reverse("admin:index", current_app=self.admin_site.name)
return HttpResponseRedirect(url)
@csrf_protect_m
def changeform_view(self, request, object_id=None, form_url="", extra_context=None):
with transaction.atomic(using=router.db_for_write(self.model)):
return self._changeform_view(request, object_id, form_url, extra_context)
def _changeform_view(self, request, object_id, form_url, extra_context):
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField(
"The field %s cannot be referenced." % to_field
)
if request.method == "POST" and "_saveasnew" in request.POST:
object_id = None
add = object_id is None
if add:
if not self.has_add_permission(request):
raise PermissionDenied
obj = None
else:
obj = self.get_object(request, unquote(object_id), to_field)
if request.method == "POST":
if not self.has_change_permission(request, obj):
raise PermissionDenied
else:
if not self.has_view_or_change_permission(request, obj):
raise PermissionDenied
if obj is None:
return self._get_obj_does_not_exist_redirect(
request, self.opts, object_id
)
fieldsets = self.get_fieldsets(request, obj)
ModelForm = self.get_form(
request, obj, change=not add, fields=flatten_fieldsets(fieldsets)
)
if request.method == "POST":
form = ModelForm(request.POST, request.FILES, instance=obj)
formsets, inline_instances = self._create_formsets(
request,
form.instance,
change=not add,
)
form_validated = form.is_valid()
if form_validated:
new_object = self.save_form(request, form, change=not add)
else:
new_object = form.instance
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, not add)
self.save_related(request, form, formsets, not add)
change_message = self.construct_change_message(
request, form, formsets, add
)
if add:
self.log_addition(request, new_object, change_message)
return self.response_add(request, new_object)
else:
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form_validated = False
else:
if add:
initial = self.get_changeform_initial_data(request)
form = ModelForm(initial=initial)
formsets, inline_instances = self._create_formsets(
request, form.instance, change=False
)
else:
form = ModelForm(instance=obj)
formsets, inline_instances = self._create_formsets(
request, obj, change=True
)
if not add and not self.has_change_permission(request, obj):
readonly_fields = flatten_fieldsets(fieldsets)
else:
readonly_fields = self.get_readonly_fields(request, obj)
admin_form = helpers.AdminForm(
form,
list(fieldsets),
# Clear prepopulated fields on a view-only form to avoid a crash.
self.get_prepopulated_fields(request, obj)
if add or self.has_change_permission(request, obj)
else {},
readonly_fields,
model_admin=self,
)
media = self.media + admin_form.media
inline_formsets = self.get_inline_formsets(
request, formsets, inline_instances, obj
)
for inline_formset in inline_formsets:
media = media + inline_formset.media
if add:
title = _("Add %s")
elif self.has_change_permission(request, obj):
title = _("Change %s")
else:
title = _("View %s")
context = {
**self.admin_site.each_context(request),
"title": title % self.opts.verbose_name,
"subtitle": str(obj) if obj else None,
"adminform": admin_form,
"object_id": object_id,
"original": obj,
"is_popup": IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET,
"to_field": to_field,
"media": media,
"inline_admin_formsets": inline_formsets,
"errors": helpers.AdminErrorList(form, formsets),
"preserved_filters": self.get_preserved_filters(request),
}
# Hide the "Save" and "Save and continue" buttons if "Save as New" was
# previously chosen to prevent the interface from getting confusing.
if (
request.method == "POST"
and not form_validated
and "_saveasnew" in request.POST
):
context["show_save"] = False
context["show_save_and_continue"] = False
# Use the change template instead of the add template.
add = False
context.update(extra_context or {})
return self.render_change_form(
request, context, add=add, change=not add, obj=obj, form_url=form_url
)
def add_view(self, request, form_url="", extra_context=None):
return self.changeform_view(request, None, form_url, extra_context)
def change_view(self, request, object_id, form_url="", extra_context=None):
return self.changeform_view(request, object_id, form_url, extra_context)
def _get_edited_object_pks(self, request, prefix):
"""Return POST data values of list_editable primary keys."""
pk_pattern = re.compile(
r"{}-\d+-{}$".format(re.escape(prefix), self.opts.pk.name)
)
return [value for key, value in request.POST.items() if pk_pattern.match(key)]
def _get_list_editable_queryset(self, request, prefix):
"""
Based on POST data, return a queryset of the objects that were edited
via list_editable.
"""
object_pks = self._get_edited_object_pks(request, prefix)
queryset = self.get_queryset(request)
validate = queryset.model._meta.pk.to_python
try:
for pk in object_pks:
validate(pk)
except ValidationError:
# Disable the optimization if the POST data was tampered with.
return queryset
return queryset.filter(pk__in=object_pks)
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
"""
The 'change list' admin view for this model.
"""
from django.contrib.admin.views.main import ERROR_FLAG
app_label = self.opts.app_label
if not self.has_view_or_change_permission(request):
raise PermissionDenied
try:
cl = self.get_changelist_instance(request)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given
# and the 'invalid=1' parameter was already in the query string,
# something is screwed up with the database, so display an error
# page.
if ERROR_FLAG in request.GET:
return SimpleTemplateResponse(
"admin/invalid_setup.html",
{
"title": _("Database error"),
},
)
return HttpResponseRedirect(request.path + "?" + ERROR_FLAG + "=1")
# If the request was POSTed, this might be a bulk action or a bulk
# edit. Try to look up an action or confirmation first, but if this
# isn't an action the POST will fall through to the bulk edit check,
# below.
action_failed = False
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
actions = self.get_actions(request)
# Actions with no confirmation
if (
actions
and request.method == "POST"
and "index" in request.POST
and "_save" not in request.POST
):
if selected:
response = self.response_action(
request, queryset=cl.get_queryset(request)
)
if response:
return response
else:
action_failed = True
else:
msg = _(
"Items must be selected in order to perform "
"actions on them. No items have been changed."
)
self.message_user(request, msg, messages.WARNING)
action_failed = True
# Actions with confirmation
if (
actions
and request.method == "POST"
and helpers.ACTION_CHECKBOX_NAME in request.POST
and "index" not in request.POST
and "_save" not in request.POST
):
if selected:
response = self.response_action(
request, queryset=cl.get_queryset(request)
)
if response:
return response
else:
action_failed = True
if action_failed:
# Redirect back to the changelist page to avoid resubmitting the
# form if the user refreshes the browser or uses the "No, take
# me back" button on the action confirmation page.
return HttpResponseRedirect(request.get_full_path())
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if request.method == "POST" and cl.list_editable and "_save" in request.POST:
if not self.has_change_permission(request):
raise PermissionDenied
FormSet = self.get_changelist_formset(request)
modified_objects = self._get_list_editable_queryset(
request, FormSet.get_default_prefix()
)
formset = cl.formset = FormSet(
request.POST, request.FILES, queryset=modified_objects
)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
self.save_related(request, form, formsets=[], change=True)
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
msg = ngettext(
"%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount,
) % {
"count": changecount,
"name": model_ngettext(self.opts, changecount),
}
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif cl.list_editable and self.has_change_permission(request):
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields["action"].choices = self.get_action_choices(request)
media += action_form.media
else:
action_form = None
selection_note_all = ngettext(
"%(total_count)s selected", "All %(total_count)s selected", cl.result_count
)
context = {
**self.admin_site.each_context(request),
"module_name": str(self.opts.verbose_name_plural),
"selection_note": _("0 of %(cnt)s selected") % {"cnt": len(cl.result_list)},
"selection_note_all": selection_note_all % {"total_count": cl.result_count},
"title": cl.title,
"subtitle": None,
"is_popup": cl.is_popup,
"to_field": cl.to_field,
"cl": cl,
"media": media,
"has_add_permission": self.has_add_permission(request),
"opts": cl.opts,
"action_form": action_form,
"actions_on_top": self.actions_on_top,
"actions_on_bottom": self.actions_on_bottom,
"actions_selection_counter": self.actions_selection_counter,
"preserved_filters": self.get_preserved_filters(request),
**(extra_context or {}),
}
request.current_app = self.admin_site.name
return TemplateResponse(
request,
self.change_list_template
or [
"admin/%s/%s/change_list.html" % (app_label, self.opts.model_name),
"admin/%s/change_list.html" % app_label,
"admin/change_list.html",
],
context,
)
def get_deleted_objects(self, objs, request):
"""
Hook for customizing the delete process for the delete view and the
"delete selected" action.
"""
return get_deleted_objects(objs, request, self.admin_site)
@csrf_protect_m
def delete_view(self, request, object_id, extra_context=None):
with transaction.atomic(using=router.db_for_write(self.model)):
return self._delete_view(request, object_id, extra_context)
def _delete_view(self, request, object_id, extra_context):
"The 'delete' admin view for this model."
app_label = self.opts.app_label
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField(
"The field %s cannot be referenced." % to_field
)
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
return self._get_obj_does_not_exist_redirect(request, self.opts, object_id)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(
deleted_objects,
model_count,
perms_needed,
protected,
) = self.get_deleted_objects([obj], request)
if request.POST and not protected: # The user has confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = str(obj)
attr = str(to_field) if to_field else self.opts.pk.attname
obj_id = obj.serializable_value(attr)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
return self.response_delete(request, obj_display, obj_id)
object_name = str(self.opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = {
**self.admin_site.each_context(request),
"title": title,
"subtitle": None,
"object_name": object_name,
"object": obj,
"deleted_objects": deleted_objects,
"model_count": dict(model_count).items(),
"perms_lacking": perms_needed,
"protected": protected,
"opts": self.opts,
"app_label": app_label,
"preserved_filters": self.get_preserved_filters(request),
"is_popup": IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET,
"to_field": to_field,
**(extra_context or {}),
}
return self.render_delete_form(request, context)
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
from django.contrib.admin.views.main import PAGE_VAR
# First check if the user can see this history.
model = self.model
obj = self.get_object(request, unquote(object_id))
if obj is None:
return self._get_obj_does_not_exist_redirect(
request, model._meta, object_id
)
if not self.has_view_or_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
app_label = self.opts.app_label
action_list = (
LogEntry.objects.filter(
object_id=unquote(object_id),
content_type=get_content_type_for_model(model),
)
.select_related()
.order_by("action_time")
)
paginator = self.get_paginator(request, action_list, 100)
page_number = request.GET.get(PAGE_VAR, 1)
page_obj = paginator.get_page(page_number)
page_range = paginator.get_elided_page_range(page_obj.number)
context = {
**self.admin_site.each_context(request),
"title": _("Change history: %s") % obj,
"subtitle": None,
"action_list": page_obj,
"page_range": page_range,
"page_var": PAGE_VAR,
"pagination_required": paginator.count > 100,
"module_name": str(capfirst(self.opts.verbose_name_plural)),
"object": obj,
"opts": self.opts,
"preserved_filters": self.get_preserved_filters(request),
**(extra_context or {}),
}
request.current_app = self.admin_site.name
return TemplateResponse(
request,
self.object_history_template
or [
"admin/%s/%s/object_history.html" % (app_label, self.opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html",
],
context,
)
def get_formset_kwargs(self, request, obj, inline, prefix):
formset_params = {
"instance": obj,
"prefix": prefix,
"queryset": inline.get_queryset(request),
}
if request.method == "POST":
formset_params.update(
{
"data": request.POST.copy(),
"files": request.FILES,
"save_as_new": "_saveasnew" in request.POST,
}
)
return formset_params
def _create_formsets(self, request, obj, change):
"Helper function to generate formsets for add/change_view."
formsets = []
inline_instances = []
prefixes = {}
get_formsets_args = [request]
if change:
get_formsets_args.append(obj)
for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset_params = self.get_formset_kwargs(request, obj, inline, prefix)
formset = FormSet(**formset_params)
def user_deleted_form(request, obj, formset, index, inline):
"""Return whether or not the user deleted the form."""
return (
inline.has_delete_permission(request, obj)
and "{}-{}-DELETE".format(formset.prefix, index) in request.POST
)
# Bypass validation of each view-only inline form (since the form's
# data won't be in request.POST), unless the form was deleted.
if not inline.has_change_permission(request, obj if change else None):
for index, form in enumerate(formset.initial_forms):
if user_deleted_form(request, obj, formset, index, inline):
continue
form._errors = {}
form.cleaned_data = form.initial
formsets.append(formset)
inline_instances.append(inline)
return formsets, inline_instances
class InlineModelAdmin(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``fk_name`` to specify the attribute name of the ``ForeignKey``
from ``model`` to its parent. This is required if ``model`` has more than
one ``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
min_num = None
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
show_change_link = False
checks_class = InlineModelAdminChecks
classes = None
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
self.has_registered_model = admin_site.is_registered(self.model)
super().__init__()
if self.verbose_name_plural is None:
if self.verbose_name is None:
self.verbose_name_plural = self.opts.verbose_name_plural
else:
self.verbose_name_plural = format_lazy("{}s", self.verbose_name)
if self.verbose_name is None:
self.verbose_name = self.opts.verbose_name
@property
def media(self):
extra = "" if settings.DEBUG else ".min"
js = ["vendor/jquery/jquery%s.js" % extra, "jquery.init.js", "inlines.js"]
if self.filter_vertical or self.filter_horizontal:
js.extend(["SelectBox.js", "SelectFilter2.js"])
if self.classes and "collapse" in self.classes:
js.append("collapse.js")
return forms.Media(js=["admin/js/%s" % url for url in js])
def get_extra(self, request, obj=None, **kwargs):
"""Hook for customizing the number of extra inline forms."""
return self.extra
def get_min_num(self, request, obj=None, **kwargs):
"""Hook for customizing the min number of inline forms."""
return self.min_num
def get_max_num(self, request, obj=None, **kwargs):
"""Hook for customizing the max number of extra inline forms."""
return self.max_num
def get_formset(self, request, obj=None, **kwargs):
"""Return a BaseInlineFormSet class for use in admin add/change views."""
if "fields" in kwargs:
fields = kwargs.pop("fields")
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
excluded = self.get_exclude(request, obj)
exclude = [] if excluded is None else list(excluded)
exclude.extend(self.get_readonly_fields(request, obj))
if excluded is None and hasattr(self.form, "_meta") and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# InlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# If exclude is an empty list we use None, since that's the actual
# default.
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"extra": self.get_extra(request, obj, **kwargs),
"min_num": self.get_min_num(request, obj, **kwargs),
"max_num": self.get_max_num(request, obj, **kwargs),
"can_delete": can_delete,
**kwargs,
}
base_model_form = defaults["form"]
can_change = self.has_change_permission(request, obj) if request else True
can_add = self.has_add_permission(request, obj) if request else True
class DeleteProtectedModelForm(base_model_form):
def hand_clean_DELETE(self):
"""
We don't validate the 'DELETE' field itself because on
templates it's not rendered using the field information, but
just using a generic "deletion_field" of the InlineModelAdmin.
"""
if self.cleaned_data.get(DELETION_FIELD_NAME, False):
using = router.db_for_write(self._meta.model)
collector = NestedObjects(using=using)
if self.instance._state.adding:
return
collector.collect([self.instance])
if collector.protected:
objs = []
for p in collector.protected:
objs.append(
# Translators: Model verbose name and instance
# representation, suitable to be an item in a
# list.
_("%(class_name)s %(instance)s")
% {"class_name": p._meta.verbose_name, "instance": p}
)
params = {
"class_name": self._meta.model._meta.verbose_name,
"instance": self.instance,
"related_objects": get_text_list(objs, _("and")),
}
msg = _(
"Deleting %(class_name)s %(instance)s would require "
"deleting the following protected related objects: "
"%(related_objects)s"
)
raise ValidationError(
msg, code="deleting_protected", params=params
)
def is_valid(self):
result = super().is_valid()
self.hand_clean_DELETE()
return result
def has_changed(self):
# Protect against unauthorized edits.
if not can_change and not self.instance._state.adding:
return False
if not can_add and self.instance._state.adding:
return False
return super().has_changed()
defaults["form"] = DeleteProtectedModelForm
if defaults["fields"] is None and not modelform_defines_fields(
defaults["form"]
):
defaults["fields"] = forms.ALL_FIELDS
return inlineformset_factory(self.parent_model, self.model, **defaults)
def _get_form_for_get_fields(self, request, obj=None):
return self.get_formset(request, obj, fields=None).form
def get_queryset(self, request):
queryset = super().get_queryset(request)
if not self.has_view_or_change_permission(request):
queryset = queryset.none()
return queryset
def _has_any_perms_for_target_model(self, request, perms):
"""
This method is called only when the ModelAdmin's model is for an
ManyToManyField's implicit through model (if self.opts.auto_created).
Return True if the user has any of the given permissions ('add',
'change', etc.) for the model that points to the through model.
"""
opts = self.opts
# Find the target model of an auto-created many-to-many relationship.
for field in opts.fields:
if field.remote_field and field.remote_field.model != self.parent_model:
opts = field.remote_field.model._meta
break
return any(
request.user.has_perm(
"%s.%s" % (opts.app_label, get_permission_codename(perm, opts))
)
for perm in perms
)
def has_add_permission(self, request, obj):
if self.opts.auto_created:
# Auto-created intermediate models don't have their own
# permissions. The user needs to have the change permission for the
# related model in order to be able to do anything with the
# intermediate model.
return self._has_any_perms_for_target_model(request, ["change"])
return super().has_add_permission(request)
def has_change_permission(self, request, obj=None):
if self.opts.auto_created:
# Same comment as has_add_permission().
return self._has_any_perms_for_target_model(request, ["change"])
return super().has_change_permission(request)
def has_delete_permission(self, request, obj=None):
if self.opts.auto_created:
# Same comment as has_add_permission().
return self._has_any_perms_for_target_model(request, ["change"])
return super().has_delete_permission(request, obj)
def has_view_permission(self, request, obj=None):
if self.opts.auto_created:
# Same comment as has_add_permission(). The 'change' permission
# also implies the 'view' permission.
return self._has_any_perms_for_target_model(request, ["view", "change"])
return super().has_view_permission(request)
class StackedInline(InlineModelAdmin):
template = "admin/edit_inline/stacked.html"
class TabularInline(InlineModelAdmin):
template = "admin/edit_inline/tabular.html"
|
5d3fdefb8b344e0d9f9a04b42a351c7a7c5eb1d71e8a1142b47867e98da444da | import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from unittest import mock
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
Aggregate,
Avg,
Case,
Count,
DecimalField,
F,
IntegerField,
Max,
Q,
StdDev,
Sum,
Value,
Variance,
When,
)
from django.test import TestCase, skipUnlessAnyDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
from .models import (
Alfa,
Author,
Book,
Bravo,
Charlie,
Clues,
Entries,
HardbackBook,
ItemTag,
Publisher,
SelfRefFK,
Store,
WithManualPK,
)
class AggregationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name="Adrian Holovaty", age=34)
cls.a2 = Author.objects.create(name="Jacob Kaplan-Moss", age=35)
cls.a3 = Author.objects.create(name="Brad Dayley", age=45)
cls.a4 = Author.objects.create(name="James Bennett", age=29)
cls.a5 = Author.objects.create(name="Jeffrey Forcier", age=37)
cls.a6 = Author.objects.create(name="Paul Bissex", age=29)
cls.a7 = Author.objects.create(name="Wesley J. Chun", age=25)
cls.a8 = Author.objects.create(name="Peter Norvig", age=57)
cls.a9 = Author.objects.create(name="Stuart Russell", age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name="Apress", num_awards=3)
cls.p2 = Publisher.objects.create(name="Sams", num_awards=1)
cls.p3 = Publisher.objects.create(name="Prentice Hall", num_awards=7)
cls.p4 = Publisher.objects.create(name="Morgan Kaufmann", num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn="159059725",
name="The Definitive Guide to Django: Web Development Done Right",
pages=447,
rating=4.5,
price=Decimal("30.00"),
contact=cls.a1,
publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6),
)
cls.b2 = Book.objects.create(
isbn="067232959",
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
rating=3.0,
price=Decimal("23.09"),
contact=cls.a3,
publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3),
)
cls.b3 = Book.objects.create(
isbn="159059996",
name="Practical Django Projects",
pages=300,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a4,
publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23),
)
cls.b4 = Book.objects.create(
isbn="013235613",
name="Python Web Development with Django",
pages=350,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a5,
publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3),
)
cls.b5 = HardbackBook.objects.create(
isbn="013790395",
name="Artificial Intelligence: A Modern Approach",
pages=1132,
rating=4.0,
price=Decimal("82.80"),
contact=cls.a8,
publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15),
weight=4.5,
)
cls.b6 = HardbackBook.objects.create(
isbn="155860191",
name=(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp"
),
pages=946,
rating=5.0,
price=Decimal("75.00"),
contact=cls.a8,
publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15),
weight=3.7,
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name="Amazon.com",
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59),
)
s2 = Store.objects.create(
name="Books.com",
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59),
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30),
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in kwargs.items():
self.assertEqual(getattr(obj, attr), value)
def test_annotation_with_value(self):
values = (
Book.objects.filter(
name="Practical Django Projects",
)
.annotate(
discount_price=F("price") * 2,
)
.values(
"discount_price",
)
.annotate(sum_discount=Sum("discount_price"))
)
self.assertSequenceEqual(
values,
[{"discount_price": Decimal("59.38"), "sum_discount": Decimal("59.38")}],
)
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
The subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values("contact").annotate(Max("id"))
qs = qs.order_by("contact").values_list("id__max", flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by("id")
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values("contact").annotate(Max("id"))
qs = qs.order_by("contact").values_list("id__max", flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by("id")
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
# Oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(
select={
"name_of_shortest_book": shortest_book_sql,
}
).annotate(total_books=Count("book"))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)},
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum("pages"), Avg("pages")),
{"pages__sum": 3703, "pages__avg": Approximate(617.166, places=2)},
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum("pages"), Avg("pages")),
{"pages__sum": 3703, "pages__avg": Approximate(617.166, places=2)},
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={"price_per_page": "price / pages"}).aggregate(
Sum("pages")
),
{"pages__sum": 3703},
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"manufacture_cost": "price * .5"})
.get(pk=self.b2.pk)
)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn="067232959",
mean_auth_age=45.0,
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0,
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal("11.545")))
# Order of the annotate/extra in the query doesn't matter
obj = (
Book.objects.extra(select={"manufacture_cost": "price * .5"})
.annotate(mean_auth_age=Avg("authors__age"))
.get(pk=self.b2.pk)
)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn="067232959",
mean_auth_age=45.0,
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0,
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal("11.545")))
# Values queries can be combined with annotate and extra
obj = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"manufacture_cost": "price * .5"})
.values()
.get(pk=self.b2.pk)
)
manufacture_cost = obj["manufacture_cost"]
self.assertIn(manufacture_cost, (11.545, Decimal("11.545")))
del obj["manufacture_cost"]
self.assertEqual(
obj,
{
"id": self.b2.id,
"contact_id": self.a3.id,
"isbn": "067232959",
"mean_auth_age": 45.0,
"name": "Sams Teach Yourself Django in 24 Hours",
"pages": 528,
"price": Decimal("23.09"),
"pubdate": datetime.date(2008, 3, 3),
"publisher_id": self.p2.id,
"rating": 3.0,
},
)
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = (
Book.objects.values()
.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"manufacture_cost": "price * .5"})
.get(pk=self.b2.pk)
)
manufacture_cost = obj["manufacture_cost"]
self.assertIn(manufacture_cost, (11.545, Decimal("11.545")))
del obj["manufacture_cost"]
self.assertEqual(
obj,
{
"id": self.b2.id,
"contact_id": self.a3.id,
"isbn": "067232959",
"mean_auth_age": 45.0,
"name": "Sams Teach Yourself Django in 24 Hours",
"pages": 528,
"price": Decimal("23.09"),
"pubdate": datetime.date(2008, 3, 3),
"publisher_id": self.p2.id,
"rating": 3.0,
},
)
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"price_per_page": "price / pages"})
.values("name")
.get(pk=self.b1.pk)
)
self.assertEqual(
obj,
{
"name": "The Definitive Guide to Django: Web Development Done Right",
},
)
obj = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"price_per_page": "price / pages"})
.values("name", "mean_auth_age")
.get(pk=self.b1.pk)
)
self.assertEqual(
obj,
{
"mean_auth_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
},
)
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = (
Book.objects.annotate(n_authors=Count("authors"))
.values("name")
.filter(n_authors__gt=2)
)
self.assertSequenceEqual(
qs,
[{"name": "Python Web Development with Django"}],
)
# The annotations are added to values output if values() precedes
# annotate()
obj = (
Book.objects.values("name")
.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"price_per_page": "price / pages"})
.get(pk=self.b1.pk)
)
self.assertEqual(
obj,
{
"mean_auth_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
},
)
# All of the objects are getting counted (allow_nulls) and that values
# respects the amount of objects
self.assertEqual(len(Author.objects.annotate(Avg("friends__age")).values()), 9)
# Consecutive calls to annotate accumulate in the query
qs = (
Book.objects.values("price")
.annotate(oldest=Max("authors__age"))
.order_by("oldest", "price")
.annotate(Max("publisher__num_awards"))
)
self.assertSequenceEqual(
qs,
[
{"price": Decimal("30"), "oldest": 35, "publisher__num_awards__max": 3},
{
"price": Decimal("29.69"),
"oldest": 37,
"publisher__num_awards__max": 7,
},
{
"price": Decimal("23.09"),
"oldest": 45,
"publisher__num_awards__max": 1,
},
{"price": Decimal("75"), "oldest": 57, "publisher__num_awards__max": 9},
{
"price": Decimal("82.8"),
"oldest": 57,
"publisher__num_awards__max": 7,
},
],
)
def test_aggregate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(
Max("pages"), Max("price"), Sum("num_authors"), Avg("num_authors")
)
self.assertEqual(
vals,
{
"num_authors__sum": 10,
"num_authors__avg": Approximate(1.666, places=2),
"pages__max": 1132,
"price__max": Decimal("82.80"),
},
)
# Regression for #15624 - Missing SELECT columns when using values, annotate
# and aggregate in a single query
self.assertEqual(
Book.objects.annotate(c=Count("authors")).values("c").aggregate(Max("c")),
{"c__max": 3},
)
def test_conditional_aggregate(self):
# Conditional aggregation of a grouped queryset.
self.assertEqual(
Book.objects.annotate(c=Count("authors"))
.values("pk")
.aggregate(test=Sum(Case(When(c__gt=1, then=1))))["test"],
3,
)
def test_sliced_conditional_aggregate(self):
self.assertEqual(
Author.objects.order_by("pk")[:5].aggregate(
test=Sum(Case(When(age__lte=35, then=1)))
)["test"],
3,
)
def test_annotated_conditional_aggregate(self):
annotated_qs = Book.objects.annotate(
discount_price=F("price") * Decimal("0.75")
)
self.assertAlmostEqual(
annotated_qs.aggregate(
test=Avg(
Case(
When(pages__lt=400, then="discount_price"),
output_field=DecimalField(),
)
)
)["test"],
Decimal("22.27"),
places=2,
)
def test_distinct_conditional_aggregate(self):
self.assertEqual(
Book.objects.distinct().aggregate(
test=Avg(
Case(
When(price=Decimal("29.69"), then="pages"),
output_field=IntegerField(),
)
)
)["test"],
325,
)
def test_conditional_aggregate_on_complex_condition(self):
self.assertEqual(
Book.objects.distinct().aggregate(
test=Avg(
Case(
When(
Q(price__gte=Decimal("29")) & Q(price__lt=Decimal("30")),
then="pages",
),
output_field=IntegerField(),
)
)
)["test"],
325,
)
def test_q_annotation_aggregate(self):
self.assertEqual(Book.objects.annotate(has_pk=Q(pk__isnull=False)).count(), 6)
def test_decimal_aggregate_annotation_filter(self):
"""
Filtering on an aggregate annotation with Decimal values should work.
Requires special handling on SQLite (#18247).
"""
self.assertEqual(
len(
Author.objects.annotate(sum=Sum("book_contact_set__price")).filter(
sum__gt=Decimal(40)
)
),
1,
)
self.assertEqual(
len(
Author.objects.annotate(sum=Sum("book_contact_set__price")).filter(
sum__lte=Decimal(40)
)
),
4,
)
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
msg = (
"Cannot resolve keyword 'foo' into field. Choices are: authors, "
"contact, contact_id, hardbackbook, id, isbn, name, pages, price, "
"pubdate, publisher, publisher_id, rating, store, tags"
)
with self.assertRaisesMessage(FieldError, msg):
Book.objects.aggregate(num_authors=Count("foo"))
with self.assertRaisesMessage(FieldError, msg):
Book.objects.annotate(num_authors=Count("foo"))
msg = (
"Cannot resolve keyword 'foo' into field. Choices are: authors, "
"contact, contact_id, hardbackbook, id, isbn, name, num_authors, "
"pages, price, pubdate, publisher, publisher_id, rating, store, tags"
)
with self.assertRaisesMessage(FieldError, msg):
Book.objects.annotate(num_authors=Count("authors__id")).aggregate(
Max("foo")
)
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(Book.objects.annotate(num_authors=Count("authors")).count(), 6)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count("authors")).aggregate(
Max("num_authors")
)
self.assertEqual(vals, {"num_authors__max": 3})
vals = Publisher.objects.annotate(avg_price=Avg("book__price")).aggregate(
Max("avg_price")
)
self.assertEqual(vals, {"avg_price__max": 75.0})
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max("pages"), select=Max("pages"))
self.assertEqual(vals, {"number": 1132, "select": 1132})
# Regression for #10064: select_related() plays nice with aggregates
obj = (
Book.objects.select_related("publisher")
.annotate(num_authors=Count("authors"))
.values()
.get(isbn="013790395")
)
self.assertEqual(
obj,
{
"contact_id": self.a8.id,
"id": self.b5.id,
"isbn": "013790395",
"name": "Artificial Intelligence: A Modern Approach",
"num_authors": 2,
"pages": 1132,
"price": Decimal("82.8"),
"pubdate": datetime.date(1995, 1, 15),
"publisher_id": self.p3.id,
"rating": 4.0,
},
)
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(len(Book.objects.annotate(num_authors=Count("authors"))), 6)
self.assertEqual(
len(
Book.objects.annotate(num_authors=Count("authors")).filter(
num_authors__gt=2
)
),
1,
)
self.assertEqual(
len(
Book.objects.annotate(num_authors=Count("authors")).exclude(
num_authors__gt=2
)
),
5,
)
self.assertEqual(
len(
Book.objects.annotate(num_authors=Count("authors"))
.filter(num_authors__lt=3)
.exclude(num_authors__lt=2)
),
2,
)
self.assertEqual(
len(
Book.objects.annotate(num_authors=Count("authors"))
.exclude(num_authors__lt=2)
.filter(num_authors__lt=3)
),
2,
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__lt=F("num_awards") / 2)
.order_by("name")
.values("name", "num_books", "num_awards")
)
self.assertSequenceEqual(
qs,
[
{"num_books": 1, "name": "Morgan Kaufmann", "num_awards": 9},
{"num_books": 2, "name": "Prentice Hall", "num_awards": 7},
],
)
qs = (
Publisher.objects.annotate(num_books=Count("book"))
.exclude(num_books__lt=F("num_awards") / 2)
.order_by("name")
.values("name", "num_books", "num_awards")
)
self.assertSequenceEqual(
qs,
[
{"num_books": 2, "name": "Apress", "num_awards": 3},
{"num_books": 0, "name": "Jonno's House of Books", "num_awards": 0},
{"num_books": 1, "name": "Sams", "num_awards": 1},
],
)
# ... and where the F() references an aggregate
qs = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_awards__gt=2 * F("num_books"))
.order_by("name")
.values("name", "num_books", "num_awards")
)
self.assertSequenceEqual(
qs,
[
{"num_books": 1, "name": "Morgan Kaufmann", "num_awards": 9},
{"num_books": 2, "name": "Prentice Hall", "num_awards": 7},
],
)
qs = (
Publisher.objects.annotate(num_books=Count("book"))
.exclude(num_books__lt=F("num_awards") / 2)
.order_by("name")
.values("name", "num_books", "num_awards")
)
self.assertSequenceEqual(
qs,
[
{"num_books": 2, "name": "Apress", "num_awards": 3},
{"num_books": 0, "name": "Jonno's House of Books", "num_awards": 0},
{"num_books": 1, "name": "Sams", "num_awards": 1},
],
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = Clues.objects.values("EntryID__Entry").annotate(
Appearances=Count("EntryID"), Distinct_Clues=Count("Clue", distinct=True)
)
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count("clues__ID"))
self.assertQuerysetEqual(qs, [])
def test_boolean_conversion(self):
# Aggregates mixed up ordering of columns for backend's convert_values
# method. Refs #21126.
e = Entries.objects.create(Entry="foo")
c = Clues.objects.create(EntryID=e, Clue="bar")
qs = Clues.objects.select_related("EntryID").annotate(Count("ID"))
self.assertSequenceEqual(qs, [c])
self.assertEqual(qs[0].EntryID, e)
self.assertIs(qs[0].EntryID.Exclude, False)
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(Book.objects.filter(id__in=[]).count(), 0)
vals = Book.objects.filter(id__in=[]).aggregate(
num_authors=Count("authors"),
avg_authors=Avg("authors"),
max_authors=Max("authors"),
max_price=Max("price"),
max_rating=Max("rating"),
)
self.assertEqual(
vals,
{
"max_authors": None,
"max_rating": None,
"num_authors": 0,
"avg_authors": None,
"max_price": None,
},
)
qs = (
Publisher.objects.filter(name="Jonno's House of Books")
.annotate(
num_authors=Count("book__authors"),
avg_authors=Avg("book__authors"),
max_authors=Max("book__authors"),
max_price=Max("book__price"),
max_rating=Max("book__rating"),
)
.values()
)
self.assertSequenceEqual(
qs,
[
{
"max_authors": None,
"name": "Jonno's House of Books",
"num_awards": 0,
"max_price": None,
"num_authors": 0,
"max_rating": None,
"id": self.p5.id,
"avg_authors": None,
}
],
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count("authors")).order_by(
"publisher__name", "name"
),
[
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name,
)
# Regression for #10127 - Empty select_related() works with annotate
qs = (
Book.objects.filter(rating__lt=4.5)
.select_related()
.annotate(Avg("authors__age"))
.order_by("name")
)
self.assertQuerysetEqual(
qs,
[
(
"Artificial Intelligence: A Modern Approach",
51.5,
"Prentice Hall",
"Peter Norvig",
),
("Practical Django Projects", 29.0, "Apress", "James Bennett"),
(
"Python Web Development with Django",
Approximate(30.333, places=2),
"Prentice Hall",
"Jeffrey Forcier",
),
("Sams Teach Yourself Django in 24 Hours", 45.0, "Sams", "Brad Dayley"),
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name),
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = (
Book.objects.extra(select={"pub": "publisher_id"})
.values("pub")
.annotate(Count("id"))
.order_by("pub")
)
self.assertSequenceEqual(
qs,
[
{"pub": self.p1.id, "id__count": 2},
{"pub": self.p2.id, "id__count": 1},
{"pub": self.p3.id, "id__count": 2},
{"pub": self.p4.id, "id__count": 1},
],
)
qs = (
Book.objects.extra(select={"pub": "publisher_id", "foo": "pages"})
.values("pub")
.annotate(Count("id"))
.order_by("pub")
)
self.assertSequenceEqual(
qs,
[
{"pub": self.p1.id, "id__count": 2},
{"pub": self.p2.id, "id__count": 1},
{"pub": self.p3.id, "id__count": 2},
{"pub": self.p4.id, "id__count": 1},
],
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = (
Book.objects.filter(pages__gt=100)
.annotate(n_authors=Count("authors"))
.filter(n_authors__gt=2)
.order_by("n_authors")
)
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids),
[
"Python Web Development with Django",
],
lambda b: b.name,
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qstr = str(
Book.objects.values("publisher")
.annotate(max_pages=Max("pages"))
.order_by()
.query
)
# There is just one GROUP BY clause (zero commas means at most one clause).
self.assertEqual(qstr[qstr.index("GROUP BY") :].count(", "), 0)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
msg = (
"The named annotation 'authors__age__avg' conflicts with "
"the default name for another annotation."
)
with self.assertRaisesMessage(ValueError, msg):
Book.objects.annotate(
Avg("authors__age"), authors__age__avg=Avg("authors__age")
)
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with a field name on the model raises ValueError
msg = "The annotation 'age' conflicts with a field on the model."
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(age=Avg("friends__age"))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with an m2m name on the model raises ValueError
msg = "The annotation 'friends' conflicts with a field on the model."
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(friends=Count("friends"))
def test_fk_attname_conflict(self):
msg = "The annotation 'contact_id' conflicts with a field on the model."
with self.assertRaisesMessage(ValueError, msg):
Book.objects.annotate(contact_id=F("publisher_id"))
def test_values_queryset_non_conflict(self):
# If you're using a values query set, some potential conflicts are
# avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in values(), so it is.
results = (
Author.objects.values("name")
.annotate(age=Count("book_contact_set"))
.order_by("name")
)
self.assertEqual(len(results), 9)
self.assertEqual(results[0]["name"], "Adrian Holovaty")
self.assertEqual(results[0]["age"], 1)
# Same problem, but aggregating over m2m fields
results = (
Author.objects.values("name")
.annotate(age=Avg("friends__age"))
.order_by("name")
)
self.assertEqual(len(results), 9)
self.assertEqual(results[0]["name"], "Adrian Holovaty")
self.assertEqual(results[0]["age"], 32.0)
# Same problem, but colliding with an m2m field
results = (
Author.objects.values("name")
.annotate(friends=Count("friends"))
.order_by("name")
)
self.assertEqual(len(results), 9)
self.assertEqual(results[0]["name"], "Adrian Holovaty")
self.assertEqual(results[0]["friends"], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with a reverse-related name on the model raises ValueError
msg = "The annotation 'book_contact_set' conflicts with a field on the model."
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(book_contact_set=Avg("friends__age"))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count("authors"))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(),
[
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
"Practical Django Projects",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name,
)
# Regression for #10248 - Annotations work with dates()
qs = (
Book.objects.annotate(num_authors=Count("authors"))
.filter(num_authors=2)
.dates("pubdate", "day")
)
self.assertSequenceEqual(
qs,
[
datetime.date(1995, 1, 15),
datetime.date(2007, 12, 6),
],
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"sheets": "(pages + %s) / %s"}, select_params=[1, 2])
.order_by("sheets")
.values("sheets")
)
self.assertQuerysetEqual(
qs, [150, 175, 224, 264, 473, 566], lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values("publisher").annotate(Count("publisher")).count(), 4
)
self.assertEqual(
Book.objects.annotate(Count("publisher")).values("publisher").count(), 6
)
# Note: intentionally no order_by(), that case needs tests, too.
publishers = Publisher.objects.filter(id__in=[self.p1.id, self.p2.id])
self.assertEqual(sorted(p.name for p in publishers), ["Apress", "Sams"])
publishers = publishers.annotate(n_books=Count("book"))
sorted_publishers = sorted(publishers, key=lambda x: x.name)
self.assertEqual(sorted_publishers[0].n_books, 2)
self.assertEqual(sorted_publishers[1].n_books, 1)
self.assertEqual(sorted(p.name for p in publishers), ["Apress", "Sams"])
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books,
[
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name,
)
self.assertEqual(sorted(p.name for p in publishers), ["Apress", "Sams"])
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum("book_ptr__pages")),
{"n_pages": 2078},
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum("pages")),
{"n_pages": 2078},
)
qs = (
HardbackBook.objects.annotate(
n_authors=Count("book_ptr__authors"),
)
.values("name", "n_authors")
.order_by("name")
)
self.assertSequenceEqual(
qs,
[
{"n_authors": 2, "name": "Artificial Intelligence: A Modern Approach"},
{
"n_authors": 1,
"name": (
"Paradigms of Artificial Intelligence Programming: Case "
"Studies in Common Lisp"
),
},
],
)
qs = (
HardbackBook.objects.annotate(n_authors=Count("authors"))
.values("name", "n_authors")
.order_by("name")
)
self.assertSequenceEqual(
qs,
[
{"n_authors": 2, "name": "Artificial Intelligence: A Modern Approach"},
{
"n_authors": 1,
"name": (
"Paradigms of Artificial Intelligence Programming: Case "
"Studies in Common Lisp"
),
},
],
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
msg = "Cannot compute Avg('mean_age'): 'mean_age' is an aggregate"
with self.assertRaisesMessage(FieldError, msg):
Book.objects.annotate(mean_age=Avg("authors__age")).annotate(
Avg("mean_age")
)
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(), 0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[])
.annotate(Count("friends"))
.aggregate(Count("pk")),
{"pk__count": 0},
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg("age")), {"age__avg": None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name"))
.exclude(friends__name="Joe")
.count(),
Author.objects.count(),
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = (
Book.objects.values("name")
.annotate(n_authors=Count("authors"))
.filter(pages__lt=F("n_authors") * 200)
.values_list("pk")
)
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs),
["Python Web Development with Django"],
attrgetter("name"),
)
def test_values_annotate_values(self):
qs = (
Book.objects.values("name")
.annotate(n_authors=Count("authors"))
.values_list("pk", flat=True)
.order_by("name")
)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# When a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = (
Book.objects.values_list("name")
.annotate(n_authors=Count("authors"))
.filter(pages__gt=F("n_authors"))
.values_list("name", flat=True)
.order_by("name")
)
# Results should be the same, all Books have more pages than authors
self.assertEqual(list(qs), list(Book.objects.values_list("name", flat=True)))
def test_values_list_annotation_args_ordering(self):
"""
Annotate *args ordering should be preserved in values_list results.
**kwargs comes after *args.
Regression test for #23659.
"""
books = (
Book.objects.values_list("publisher__name")
.annotate(
Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages")
)
.order_by("-publisher__name")
)
self.assertEqual(books[0], ("Sams", 1, Decimal("23.09"), 45.0, 528.0))
def test_annotation_disjunction(self):
qs = (
Book.objects.annotate(n_authors=Count("authors"))
.filter(Q(n_authors=2) | Q(name="Python Web Development with Django"))
.order_by("name")
)
self.assertQuerysetEqual(
qs,
[
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name"),
)
qs = (
Book.objects.annotate(n_authors=Count("authors")).filter(
Q(name="The Definitive Guide to Django: Web Development Done Right")
| (
Q(name="Artificial Intelligence: A Modern Approach")
& Q(n_authors=3)
)
)
).order_by("name")
self.assertQuerysetEqual(
qs,
[
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name"),
)
qs = (
Publisher.objects.annotate(
rating_sum=Sum("book__rating"), book_count=Count("book")
)
.filter(Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True))
.order_by("pk")
)
self.assertQuerysetEqual(
qs,
[
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name"),
)
qs = (
Publisher.objects.annotate(
rating_sum=Sum("book__rating"), book_count=Count("book")
)
.filter(Q(rating_sum__gt=F("book_count")) | Q(rating_sum=None))
.order_by("num_awards")
)
self.assertQuerysetEqual(
qs,
[
"Jonno's House of Books",
"Sams",
"Apress",
"Prentice Hall",
"Morgan Kaufmann",
],
attrgetter("name"),
)
def test_quoting_aggregate_order_by(self):
qs = (
Book.objects.filter(name="Python Web Development with Django")
.annotate(authorCount=Count("authors"))
.order_by("authorCount")
)
self.assertQuerysetEqual(
qs,
[
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount),
)
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev("pages")),
{"pages__stddev": Approximate(311.46, 1)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("rating")),
{"rating__stddev": Approximate(0.60, 1)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("price")),
{"price__stddev": Approximate(Decimal("24.16"), 2)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("pages", sample=True)),
{"pages__stddev": Approximate(341.19, 2)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("rating", sample=True)),
{"rating__stddev": Approximate(0.66, 2)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("price", sample=True)),
{"price__stddev": Approximate(Decimal("26.46"), 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("pages")),
{"pages__variance": Approximate(97010.80, 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("rating")),
{"rating__variance": Approximate(0.36, 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("price")),
{"price__variance": Approximate(Decimal("583.77"), 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("pages", sample=True)),
{"pages__variance": Approximate(116412.96, 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("rating", sample=True)),
{"rating__variance": Approximate(0.44, 2)},
)
self.assertEqual(
Book.objects.aggregate(Variance("price", sample=True)),
{"price__variance": Approximate(Decimal("700.53"), 2)},
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = (
Author.objects.annotate(book_cnt=Count("book"))
.filter(book_cnt=2)
.order_by("name")
)
self.assertQuerysetEqual(qs, ["Peter Norvig"], lambda b: b.name)
# Neither in this case
qs = (
Author.objects.annotate(book_count=Count("book"))
.filter(book_count=2)
.order_by("name")
)
self.assertQuerysetEqual(qs, ["Peter Norvig"], lambda b: b.name)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = (
Author.objects.annotate(Count("book"))
.filter(book__count=2)
.order_by("name")
)
self.assertQuerysetEqual(qs, ["Peter Norvig"], lambda b: b.name)
# Referencing the auto-generated name in an aggregate() also works.
self.assertEqual(
Author.objects.annotate(Count("book")).aggregate(Max("book__count")),
{"book__count__max": 2},
)
def test_annotate_joins(self):
"""
The base table's join isn't promoted to LOUTER. This could
cause the query generation to fail if there is an exclude() for fk-field
in the query, too. Refs #19087.
"""
qs = Book.objects.annotate(n=Count("pk"))
self.assertIs(qs.query.alias_map["aggregation_regress_book"].join_type, None)
# The query executes without problems.
self.assertEqual(len(qs.exclude(publisher=-1)), 6)
@skipUnlessAnyDBFeature("allows_group_by_pk", "allows_group_by_selected_pks")
def test_aggregate_duplicate_columns(self):
# Regression test for #17144
results = Author.objects.annotate(num_contacts=Count("book_contact_set"))
# There should only be one GROUP BY clause, for the `id` column.
# `name` and `age` should not be grouped on.
_, _, group_by = results.query.get_compiler(using="default").pre_sql_setup()
self.assertEqual(len(group_by), 1)
self.assertIn("id", group_by[0][0])
self.assertNotIn("name", group_by[0][0])
self.assertNotIn("age", group_by[0][0])
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by("name")],
[
("Adrian Holovaty", 1),
("Brad Dayley", 1),
("Jacob Kaplan-Moss", 0),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 0),
("Peter Norvig", 2),
("Stuart Russell", 0),
("Wesley J. Chun", 0),
],
)
@skipUnlessAnyDBFeature("allows_group_by_pk", "allows_group_by_selected_pks")
def test_aggregate_duplicate_columns_only(self):
# Works with only() too.
results = Author.objects.only("id", "name").annotate(
num_contacts=Count("book_contact_set")
)
_, _, grouping = results.query.get_compiler(using="default").pre_sql_setup()
self.assertEqual(len(grouping), 1)
self.assertIn("id", grouping[0][0])
self.assertNotIn("name", grouping[0][0])
self.assertNotIn("age", grouping[0][0])
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by("name")],
[
("Adrian Holovaty", 1),
("Brad Dayley", 1),
("Jacob Kaplan-Moss", 0),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 0),
("Peter Norvig", 2),
("Stuart Russell", 0),
("Wesley J. Chun", 0),
],
)
@skipUnlessAnyDBFeature("allows_group_by_pk", "allows_group_by_selected_pks")
def test_aggregate_duplicate_columns_select_related(self):
# And select_related()
results = Book.objects.select_related("contact").annotate(
num_authors=Count("authors")
)
_, _, grouping = results.query.get_compiler(using="default").pre_sql_setup()
# In the case of `group_by_selected_pks` we also group by contact.id
# because of the select_related.
self.assertEqual(
len(grouping), 1 if connection.features.allows_group_by_pk else 2
)
self.assertIn("id", grouping[0][0])
self.assertNotIn("name", grouping[0][0])
self.assertNotIn("contact", grouping[0][0])
self.assertEqual(
[(b.name, b.num_authors) for b in results.order_by("name")],
[
("Artificial Intelligence: A Modern Approach", 2),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
1,
),
("Practical Django Projects", 1),
("Python Web Development with Django", 3),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 2),
],
)
@skipUnlessDBFeature("allows_group_by_selected_pks")
def test_aggregate_unmanaged_model_columns(self):
"""
Unmanaged models are sometimes used to represent database views which
may not allow grouping by selected primary key.
"""
def assertQuerysetResults(queryset):
self.assertEqual(
[(b.name, b.num_authors) for b in queryset.order_by("name")],
[
("Artificial Intelligence: A Modern Approach", 2),
(
"Paradigms of Artificial Intelligence Programming: Case "
"Studies in Common Lisp",
1,
),
("Practical Django Projects", 1),
("Python Web Development with Django", 3),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 2),
],
)
queryset = Book.objects.select_related("contact").annotate(
num_authors=Count("authors")
)
# Unmanaged origin model.
with mock.patch.object(Book._meta, "managed", False):
_, _, grouping = queryset.query.get_compiler(
using="default"
).pre_sql_setup()
self.assertEqual(len(grouping), len(Book._meta.fields) + 1)
for index, field in enumerate(Book._meta.fields):
self.assertIn(field.name, grouping[index][0])
self.assertIn(Author._meta.pk.name, grouping[-1][0])
assertQuerysetResults(queryset)
# Unmanaged related model.
with mock.patch.object(Author._meta, "managed", False):
_, _, grouping = queryset.query.get_compiler(
using="default"
).pre_sql_setup()
self.assertEqual(len(grouping), len(Author._meta.fields) + 1)
self.assertIn(Book._meta.pk.name, grouping[0][0])
for index, field in enumerate(Author._meta.fields):
self.assertIn(field.name, grouping[index + 1][0])
assertQuerysetResults(queryset)
@skipUnlessDBFeature("allows_group_by_selected_pks")
def test_aggregate_unmanaged_model_as_tables(self):
qs = Book.objects.select_related("contact").annotate(
num_authors=Count("authors")
)
# Force treating unmanaged models as tables.
with mock.patch(
"django.db.connection.features.allows_group_by_selected_pks_on_model",
return_value=True,
):
with mock.patch.object(Book._meta, "managed", False), mock.patch.object(
Author._meta, "managed", False
):
_, _, grouping = qs.query.get_compiler(using="default").pre_sql_setup()
self.assertEqual(len(grouping), 2)
self.assertIn("id", grouping[0][0])
self.assertIn("id", grouping[1][0])
self.assertQuerysetEqual(
qs.order_by("name"),
[
("Artificial Intelligence: A Modern Approach", 2),
(
"Paradigms of Artificial Intelligence Programming: Case "
"Studies in Common Lisp",
1,
),
("Practical Django Projects", 1),
("Python Web Development with Django", 3),
("Sams Teach Yourself Django in 24 Hours", 1),
(
"The Definitive Guide to Django: Web Development Done "
"Right",
2,
),
],
attrgetter("name", "num_authors"),
)
def test_reverse_join_trimming(self):
qs = Author.objects.annotate(Count("book_contact_set__contact"))
self.assertIn(" JOIN ", str(qs.query))
def test_aggregation_with_generic_reverse_relation(self):
"""
Regression test for #10870: Aggregates with joins ignore extra
filters provided by setup_joins
tests aggregations with generic reverse relations
"""
django_book = Book.objects.get(name="Practical Django Projects")
ItemTag.objects.create(
object_id=django_book.id,
tag="intermediate",
content_type=ContentType.objects.get_for_model(django_book),
)
ItemTag.objects.create(
object_id=django_book.id,
tag="django",
content_type=ContentType.objects.get_for_model(django_book),
)
# Assign a tag to model with same PK as the book above. If the JOIN
# used in aggregation doesn't have content type as part of the
# condition the annotation will also count the 'hi mom' tag for b.
wmpk = WithManualPK.objects.create(id=django_book.pk)
ItemTag.objects.create(
object_id=wmpk.id,
tag="hi mom",
content_type=ContentType.objects.get_for_model(wmpk),
)
ai_book = Book.objects.get(
name__startswith="Paradigms of Artificial Intelligence"
)
ItemTag.objects.create(
object_id=ai_book.id,
tag="intermediate",
content_type=ContentType.objects.get_for_model(ai_book),
)
self.assertEqual(Book.objects.aggregate(Count("tags")), {"tags__count": 3})
results = Book.objects.annotate(Count("tags")).order_by("-tags__count", "name")
self.assertEqual(
[(b.name, b.tags__count) for b in results],
[
("Practical Django Projects", 2),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
1,
),
("Artificial Intelligence: A Modern Approach", 0),
("Python Web Development with Django", 0),
("Sams Teach Yourself Django in 24 Hours", 0),
("The Definitive Guide to Django: Web Development Done Right", 0),
],
)
def test_negated_aggregation(self):
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count("book")).filter(book_cnt=2)
).order_by("name")
expected_results = [a.name for a in expected_results]
qs = (
Author.objects.annotate(book_cnt=Count("book"))
.exclude(Q(book_cnt=2), Q(book_cnt=2))
.order_by("name")
)
self.assertQuerysetEqual(qs, expected_results, lambda b: b.name)
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count("book")).filter(book_cnt=2)
).order_by("name")
expected_results = [a.name for a in expected_results]
qs = (
Author.objects.annotate(book_cnt=Count("book"))
.exclude(Q(book_cnt=2) | Q(book_cnt=2))
.order_by("name")
)
self.assertQuerysetEqual(qs, expected_results, lambda b: b.name)
def test_name_filters(self):
qs = (
Author.objects.annotate(Count("book"))
.filter(Q(book__count__exact=2) | Q(name="Adrian Holovaty"))
.order_by("name")
)
self.assertQuerysetEqual(
qs, ["Adrian Holovaty", "Peter Norvig"], lambda b: b.name
)
def test_name_expressions(self):
# Aggregates are spotted correctly from F objects.
# Note that Adrian's age is 34 in the fixtures, and he has one book
# so both conditions match one author.
qs = (
Author.objects.annotate(Count("book"))
.filter(Q(name="Peter Norvig") | Q(age=F("book__count") + 33))
.order_by("name")
)
self.assertQuerysetEqual(
qs, ["Adrian Holovaty", "Peter Norvig"], lambda b: b.name
)
def test_filter_aggregates_or_connector(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count("authors")).filter(q1 | q2).order_by("pk")
self.assertQuerysetEqual(
query,
[self.b1.pk, self.b4.pk, self.b5.pk, self.b6.pk],
attrgetter("pk"),
)
def test_filter_aggregates_negated_and_connector(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = (
Book.objects.annotate(Count("authors")).filter(~(q1 & q2)).order_by("pk")
)
self.assertQuerysetEqual(
query,
[self.b1.pk, self.b2.pk, self.b3.pk, self.b4.pk, self.b6.pk],
attrgetter("pk"),
)
def test_filter_aggregates_xor_connector(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count("authors")).filter(q1 ^ q2).order_by("pk")
self.assertQuerysetEqual(
query,
[self.b1.pk, self.b4.pk, self.b6.pk],
attrgetter("pk"),
)
def test_filter_aggregates_negated_xor_connector(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = (
Book.objects.annotate(Count("authors")).filter(~(q1 ^ q2)).order_by("pk")
)
self.assertQuerysetEqual(
query,
[self.b2.pk, self.b3.pk, self.b5.pk],
attrgetter("pk"),
)
def test_ticket_11293_q_immutable(self):
"""
Splitting a q object to parts for where/having doesn't alter
the original q-object.
"""
q1 = Q(isbn="")
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count("authors"))
query.filter(q1 | q2)
self.assertEqual(len(q2.children), 1)
def test_fobj_group_by(self):
"""
An F() object referring to related column works correctly in group by.
"""
qs = Book.objects.annotate(account=Count("authors")).filter(
account=F("publisher__num_awards")
)
self.assertQuerysetEqual(
qs, ["Sams Teach Yourself Django in 24 Hours"], lambda b: b.name
)
def test_annotate_reserved_word(self):
"""
Regression #18333 - Ensure annotated column name is properly quoted.
"""
vals = Book.objects.annotate(select=Count("authors__id")).aggregate(
Sum("select"), Avg("select")
)
self.assertEqual(
vals,
{
"select__sum": 10,
"select__avg": Approximate(1.666, places=2),
},
)
def test_annotate_on_relation(self):
book = Book.objects.annotate(
avg_price=Avg("price"), publisher_name=F("publisher__name")
).get(pk=self.b1.pk)
self.assertEqual(book.avg_price, 30.00)
self.assertEqual(book.publisher_name, "Apress")
def test_aggregate_on_relation(self):
# A query with an existing annotation aggregation on a relation should
# succeed.
qs = Book.objects.annotate(avg_price=Avg("price")).aggregate(
publisher_awards=Sum("publisher__num_awards")
)
self.assertEqual(qs["publisher_awards"], 30)
def test_annotate_distinct_aggregate(self):
# There are three books with rating of 4.0 and two of the books have
# the same price. Hence, the distinct removes one rating of 4.0
# from the results.
vals1 = (
Book.objects.values("rating", "price")
.distinct()
.aggregate(result=Sum("rating"))
)
vals2 = Book.objects.aggregate(result=Sum("rating") - Value(4.0))
self.assertEqual(vals1, vals2)
def test_annotate_values_list_flat(self):
"""Find ages that are shared by at least two authors."""
qs = (
Author.objects.values_list("age", flat=True)
.annotate(age_count=Count("age"))
.filter(age_count__gt=1)
)
self.assertSequenceEqual(qs, [29])
def test_allow_distinct(self):
class MyAggregate(Aggregate):
pass
with self.assertRaisesMessage(TypeError, "MyAggregate does not allow distinct"):
MyAggregate("foo", distinct=True)
class DistinctAggregate(Aggregate):
allow_distinct = True
DistinctAggregate("foo", distinct=True)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_having_subquery_select(self):
authors = Author.objects.filter(pk=self.a1.pk)
books = Book.objects.annotate(Count("authors")).filter(
Q(authors__in=authors) | Q(authors__count__gt=2)
)
self.assertEqual(set(books), {self.b1, self.b4})
class JoinPromotionTests(TestCase):
def test_ticket_21150(self):
b = Bravo.objects.create()
c = Charlie.objects.create(bravo=b)
qs = Charlie.objects.select_related("alfa").annotate(Count("bravo__charlie"))
self.assertSequenceEqual(qs, [c])
self.assertIs(qs[0].alfa, None)
a = Alfa.objects.create()
c.alfa = a
c.save()
# Force re-evaluation
qs = qs.all()
self.assertSequenceEqual(qs, [c])
self.assertEqual(qs[0].alfa, a)
def test_existing_join_not_promoted(self):
# No promotion for existing joins
qs = Charlie.objects.filter(alfa__name__isnull=False).annotate(
Count("alfa__name")
)
self.assertIn(" INNER JOIN ", str(qs.query))
# Also, the existing join is unpromoted when doing filtering for already
# promoted join.
qs = Charlie.objects.annotate(Count("alfa__name")).filter(
alfa__name__isnull=False
)
self.assertIn(" INNER JOIN ", str(qs.query))
# But, as the join is nullable first use by annotate will be LOUTER
qs = Charlie.objects.annotate(Count("alfa__name"))
self.assertIn(" LEFT OUTER JOIN ", str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = Book.objects.annotate(Count("contact__name"))
self.assertIn(" INNER JOIN ", str(qs.query))
class SelfReferentialFKTests(TestCase):
def test_ticket_24748(self):
t1 = SelfRefFK.objects.create(name="t1")
SelfRefFK.objects.create(name="t2", parent=t1)
SelfRefFK.objects.create(name="t3", parent=t1)
self.assertQuerysetEqual(
SelfRefFK.objects.annotate(num_children=Count("children")).order_by("name"),
[("t1", 2), ("t2", 0), ("t3", 0)],
lambda x: (x.name, x.num_children),
)
|
306cec4ce1b43e9f83c4bfc4fc15c6491386cde80f8d085178e2e2dbd8c9389b | import datetime
from decimal import Decimal
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db.models import (
BooleanField,
Case,
CharField,
Count,
DateTimeField,
DecimalField,
Exists,
ExpressionWrapper,
F,
FloatField,
Func,
IntegerField,
Max,
OuterRef,
Q,
Subquery,
Sum,
Value,
When,
)
from django.db.models.expressions import RawSQL
from django.db.models.functions import Coalesce, ExtractYear, Floor, Length, Lower, Trim
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import register_lookup
from .models import (
Author,
Book,
Company,
DepartmentStore,
Employee,
Publisher,
Store,
Ticket,
)
class NonAggregateAnnotationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name="Adrian Holovaty", age=34)
cls.a2 = Author.objects.create(name="Jacob Kaplan-Moss", age=35)
cls.a3 = Author.objects.create(name="Brad Dayley", age=45)
cls.a4 = Author.objects.create(name="James Bennett", age=29)
cls.a5 = Author.objects.create(name="Jeffrey Forcier", age=37)
cls.a6 = Author.objects.create(name="Paul Bissex", age=29)
cls.a7 = Author.objects.create(name="Wesley J. Chun", age=25)
cls.a8 = Author.objects.create(name="Peter Norvig", age=57)
cls.a9 = Author.objects.create(name="Stuart Russell", age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name="Apress", num_awards=3)
cls.p2 = Publisher.objects.create(name="Sams", num_awards=1)
cls.p3 = Publisher.objects.create(name="Prentice Hall", num_awards=7)
cls.p4 = Publisher.objects.create(name="Morgan Kaufmann", num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn="159059725",
name="The Definitive Guide to Django: Web Development Done Right",
pages=447,
rating=4.5,
price=Decimal("30.00"),
contact=cls.a1,
publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6),
)
cls.b2 = Book.objects.create(
isbn="067232959",
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
rating=3.0,
price=Decimal("23.09"),
contact=cls.a3,
publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3),
)
cls.b3 = Book.objects.create(
isbn="159059996",
name="Practical Django Projects",
pages=300,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a4,
publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23),
)
cls.b4 = Book.objects.create(
isbn="013235613",
name="Python Web Development with Django",
pages=350,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a5,
publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3),
)
cls.b5 = Book.objects.create(
isbn="013790395",
name="Artificial Intelligence: A Modern Approach",
pages=1132,
rating=4.0,
price=Decimal("82.80"),
contact=cls.a8,
publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15),
)
cls.b6 = Book.objects.create(
isbn="155860191",
name=(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp"
),
pages=946,
rating=5.0,
price=Decimal("75.00"),
contact=cls.a8,
publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15),
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
cls.s1 = Store.objects.create(
name="Amazon.com",
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59),
)
cls.s2 = Store.objects.create(
name="Books.com",
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59),
)
cls.s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30),
)
cls.s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
cls.s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
cls.s3.books.add(cls.b3, cls.b4, cls.b6)
def test_basic_annotation(self):
books = Book.objects.annotate(is_book=Value(1))
for book in books:
self.assertEqual(book.is_book, 1)
def test_basic_f_annotation(self):
books = Book.objects.annotate(another_rating=F("rating"))
for book in books:
self.assertEqual(book.another_rating, book.rating)
def test_joined_annotation(self):
books = Book.objects.select_related("publisher").annotate(
num_awards=F("publisher__num_awards")
)
for book in books:
self.assertEqual(book.num_awards, book.publisher.num_awards)
def test_joined_transformed_annotation(self):
Employee.objects.bulk_create(
[
Employee(
first_name="John",
last_name="Doe",
age=18,
store=self.s1,
salary=15000,
),
Employee(
first_name="Jane",
last_name="Jones",
age=30,
store=self.s2,
salary=30000,
),
Employee(
first_name="Jo",
last_name="Smith",
age=55,
store=self.s3,
salary=50000,
),
]
)
employees = Employee.objects.annotate(
store_opened_year=F("store__original_opening__year"),
)
for employee in employees:
self.assertEqual(
employee.store_opened_year,
employee.store.original_opening.year,
)
def test_custom_transform_annotation(self):
with register_lookup(DecimalField, Floor):
books = Book.objects.annotate(floor_price=F("price__floor"))
self.assertCountEqual(
books.values_list("pk", "floor_price"),
[
(self.b1.pk, 30),
(self.b2.pk, 23),
(self.b3.pk, 29),
(self.b4.pk, 29),
(self.b5.pk, 82),
(self.b6.pk, 75),
],
)
def test_chaining_transforms(self):
Company.objects.create(name=" Django Software Foundation ")
Company.objects.create(name="Yahoo")
with register_lookup(CharField, Trim), register_lookup(CharField, Length):
for expr in [Length("name__trim"), F("name__trim__length")]:
with self.subTest(expr=expr):
self.assertCountEqual(
Company.objects.annotate(length=expr).values("name", "length"),
[
{"name": " Django Software Foundation ", "length": 26},
{"name": "Yahoo", "length": 5},
],
)
def test_mixed_type_annotation_date_interval(self):
active = datetime.datetime(2015, 3, 20, 14, 0, 0)
duration = datetime.timedelta(hours=1)
expires = datetime.datetime(2015, 3, 20, 14, 0, 0) + duration
Ticket.objects.create(active_at=active, duration=duration)
t = Ticket.objects.annotate(
expires=ExpressionWrapper(
F("active_at") + F("duration"), output_field=DateTimeField()
)
).first()
self.assertEqual(t.expires, expires)
def test_mixed_type_annotation_numbers(self):
test = self.b1
b = Book.objects.annotate(
combined=ExpressionWrapper(
F("pages") + F("rating"), output_field=IntegerField()
)
).get(isbn=test.isbn)
combined = int(test.pages + test.rating)
self.assertEqual(b.combined, combined)
def test_empty_expression_annotation(self):
books = Book.objects.annotate(
selected=ExpressionWrapper(Q(pk__in=[]), output_field=BooleanField())
)
self.assertEqual(len(books), Book.objects.count())
self.assertTrue(all(not book.selected for book in books))
books = Book.objects.annotate(
selected=ExpressionWrapper(
Q(pk__in=Book.objects.none()), output_field=BooleanField()
)
)
self.assertEqual(len(books), Book.objects.count())
self.assertTrue(all(not book.selected for book in books))
def test_full_expression_annotation(self):
books = Book.objects.annotate(
selected=ExpressionWrapper(~Q(pk__in=[]), output_field=BooleanField()),
)
self.assertEqual(len(books), Book.objects.count())
self.assertTrue(all(book.selected for book in books))
def test_full_expression_annotation_with_aggregation(self):
qs = Book.objects.filter(isbn="159059725").annotate(
selected=ExpressionWrapper(~Q(pk__in=[]), output_field=BooleanField()),
rating_count=Count("rating"),
)
self.assertEqual([book.rating_count for book in qs], [1])
def test_aggregate_over_full_expression_annotation(self):
qs = Book.objects.annotate(
selected=ExpressionWrapper(~Q(pk__in=[]), output_field=BooleanField()),
).aggregate(Sum("selected"))
self.assertEqual(qs["selected__sum"], Book.objects.count())
def test_empty_queryset_annotation(self):
qs = Author.objects.annotate(empty=Subquery(Author.objects.values("id").none()))
self.assertIsNone(qs.first().empty)
def test_annotate_with_aggregation(self):
books = Book.objects.annotate(is_book=Value(1), rating_count=Count("rating"))
for book in books:
self.assertEqual(book.is_book, 1)
self.assertEqual(book.rating_count, 1)
def test_combined_expression_annotation_with_aggregation(self):
book = Book.objects.annotate(
combined=ExpressionWrapper(
Value(3) * Value(4), output_field=IntegerField()
),
rating_count=Count("rating"),
).first()
self.assertEqual(book.combined, 12)
self.assertEqual(book.rating_count, 1)
def test_combined_f_expression_annotation_with_aggregation(self):
book = (
Book.objects.filter(isbn="159059725")
.annotate(
combined=ExpressionWrapper(
F("price") * F("pages"), output_field=FloatField()
),
rating_count=Count("rating"),
)
.first()
)
self.assertEqual(book.combined, 13410.0)
self.assertEqual(book.rating_count, 1)
@skipUnlessDBFeature("supports_boolean_expr_in_select_clause")
def test_q_expression_annotation_with_aggregation(self):
book = (
Book.objects.filter(isbn="159059725")
.annotate(
isnull_pubdate=ExpressionWrapper(
Q(pubdate__isnull=True),
output_field=BooleanField(),
),
rating_count=Count("rating"),
)
.first()
)
self.assertIs(book.isnull_pubdate, False)
self.assertEqual(book.rating_count, 1)
@skipUnlessDBFeature("supports_boolean_expr_in_select_clause")
def test_grouping_by_q_expression_annotation(self):
authors = (
Author.objects.annotate(
under_40=ExpressionWrapper(Q(age__lt=40), output_field=BooleanField()),
)
.values("under_40")
.annotate(
count_id=Count("id"),
)
.values("under_40", "count_id")
)
self.assertCountEqual(
authors,
[
{"under_40": False, "count_id": 3},
{"under_40": True, "count_id": 6},
],
)
def test_aggregate_over_annotation(self):
agg = Author.objects.annotate(other_age=F("age")).aggregate(
otherage_sum=Sum("other_age")
)
other_agg = Author.objects.aggregate(age_sum=Sum("age"))
self.assertEqual(agg["otherage_sum"], other_agg["age_sum"])
@skipUnlessDBFeature("can_distinct_on_fields")
def test_distinct_on_with_annotation(self):
store = Store.objects.create(
name="test store",
original_opening=datetime.datetime.now(),
friday_night_closing=datetime.time(21, 00, 00),
)
names = [
"Theodore Roosevelt",
"Eleanor Roosevelt",
"Franklin Roosevelt",
"Ned Stark",
"Catelyn Stark",
]
for name in names:
Employee.objects.create(
store=store,
first_name=name.split()[0],
last_name=name.split()[1],
age=30,
salary=2000,
)
people = Employee.objects.annotate(
name_lower=Lower("last_name"),
).distinct("name_lower")
self.assertEqual({p.last_name for p in people}, {"Stark", "Roosevelt"})
self.assertEqual(len(people), 2)
people2 = Employee.objects.annotate(
test_alias=F("store__name"),
).distinct("test_alias")
self.assertEqual(len(people2), 1)
lengths = (
Employee.objects.annotate(
name_len=Length("first_name"),
)
.distinct("name_len")
.values_list("name_len", flat=True)
)
self.assertCountEqual(lengths, [3, 7, 8])
def test_filter_annotation(self):
books = Book.objects.annotate(is_book=Value(1)).filter(is_book=1)
for book in books:
self.assertEqual(book.is_book, 1)
def test_filter_annotation_with_f(self):
books = Book.objects.annotate(other_rating=F("rating")).filter(other_rating=3.5)
for book in books:
self.assertEqual(book.other_rating, 3.5)
def test_filter_annotation_with_double_f(self):
books = Book.objects.annotate(other_rating=F("rating")).filter(
other_rating=F("rating")
)
for book in books:
self.assertEqual(book.other_rating, book.rating)
def test_filter_agg_with_double_f(self):
books = Book.objects.annotate(sum_rating=Sum("rating")).filter(
sum_rating=F("sum_rating")
)
for book in books:
self.assertEqual(book.sum_rating, book.rating)
def test_filter_wrong_annotation(self):
with self.assertRaisesMessage(
FieldError, "Cannot resolve keyword 'nope' into field."
):
list(
Book.objects.annotate(sum_rating=Sum("rating")).filter(
sum_rating=F("nope")
)
)
def test_decimal_annotation(self):
salary = Decimal(10) ** -Employee._meta.get_field("salary").decimal_places
Employee.objects.create(
first_name="Max",
last_name="Paine",
store=Store.objects.first(),
age=23,
salary=salary,
)
self.assertEqual(
Employee.objects.annotate(new_salary=F("salary") / 10).get().new_salary,
salary / 10,
)
def test_filter_decimal_annotation(self):
qs = (
Book.objects.annotate(new_price=F("price") + 1)
.filter(new_price=Decimal(31))
.values_list("new_price")
)
self.assertEqual(qs.get(), (Decimal(31),))
def test_combined_annotation_commutative(self):
book1 = Book.objects.annotate(adjusted_rating=F("rating") + 2).get(
pk=self.b1.pk
)
book2 = Book.objects.annotate(adjusted_rating=2 + F("rating")).get(
pk=self.b1.pk
)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
book1 = Book.objects.annotate(adjusted_rating=F("rating") + None).get(
pk=self.b1.pk
)
book2 = Book.objects.annotate(adjusted_rating=None + F("rating")).get(
pk=self.b1.pk
)
self.assertIs(book1.adjusted_rating, None)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
def test_update_with_annotation(self):
book_preupdate = Book.objects.get(pk=self.b2.pk)
Book.objects.annotate(other_rating=F("rating") - 1).update(
rating=F("other_rating")
)
book_postupdate = Book.objects.get(pk=self.b2.pk)
self.assertEqual(book_preupdate.rating - 1, book_postupdate.rating)
def test_annotation_with_m2m(self):
books = (
Book.objects.annotate(author_age=F("authors__age"))
.filter(pk=self.b1.pk)
.order_by("author_age")
)
self.assertEqual(books[0].author_age, 34)
self.assertEqual(books[1].author_age, 35)
def test_annotation_reverse_m2m(self):
books = (
Book.objects.annotate(
store_name=F("store__name"),
)
.filter(
name="Practical Django Projects",
)
.order_by("store_name")
)
self.assertQuerysetEqual(
books,
["Amazon.com", "Books.com", "Mamma and Pappa's Books"],
lambda b: b.store_name,
)
def test_values_annotation(self):
"""
Annotations can reference fields in a values clause,
and contribute to an existing values clause.
"""
# annotate references a field in values()
qs = Book.objects.values("rating").annotate(other_rating=F("rating") - 1)
book = qs.get(pk=self.b1.pk)
self.assertEqual(book["rating"] - 1, book["other_rating"])
# filter refs the annotated value
book = qs.get(other_rating=4)
self.assertEqual(book["other_rating"], 4)
# can annotate an existing values with a new field
book = qs.annotate(other_isbn=F("isbn")).get(other_rating=4)
self.assertEqual(book["other_rating"], 4)
self.assertEqual(book["other_isbn"], "155860191")
def test_values_with_pk_annotation(self):
# annotate references a field in values() with pk
publishers = Publisher.objects.values("id", "book__rating").annotate(
total=Sum("book__rating")
)
for publisher in publishers.filter(pk=self.p1.pk):
self.assertEqual(publisher["book__rating"], publisher["total"])
@skipUnlessDBFeature("allows_group_by_pk")
def test_rawsql_group_by_collapse(self):
raw = RawSQL("SELECT MIN(id) FROM annotations_book", [])
qs = (
Author.objects.values("id")
.annotate(
min_book_id=raw,
count_friends=Count("friends"),
)
.order_by()
)
_, _, group_by = qs.query.get_compiler(using="default").pre_sql_setup()
self.assertEqual(len(group_by), 1)
self.assertNotEqual(raw, group_by[0])
def test_defer_annotation(self):
"""
Deferred attributes can be referenced by an annotation,
but they are not themselves deferred, and cannot be deferred.
"""
qs = Book.objects.defer("rating").annotate(other_rating=F("rating") - 1)
with self.assertNumQueries(2):
book = qs.get(other_rating=4)
self.assertEqual(book.rating, 5)
self.assertEqual(book.other_rating, 4)
with self.assertRaisesMessage(
FieldDoesNotExist, "Book has no field named 'other_rating'"
):
book = qs.defer("other_rating").get(other_rating=4)
def test_mti_annotations(self):
"""
Fields on an inherited model can be referenced by an
annotated field.
"""
d = DepartmentStore.objects.create(
name="Angus & Robinson",
original_opening=datetime.date(2014, 3, 8),
friday_night_closing=datetime.time(21, 00, 00),
chain="Westfield",
)
books = Book.objects.filter(rating__gt=4)
for b in books:
d.books.add(b)
qs = (
DepartmentStore.objects.annotate(
other_name=F("name"),
other_chain=F("chain"),
is_open=Value(True, BooleanField()),
book_isbn=F("books__isbn"),
)
.order_by("book_isbn")
.filter(chain="Westfield")
)
self.assertQuerysetEqual(
qs,
[
("Angus & Robinson", "Westfield", True, "155860191"),
("Angus & Robinson", "Westfield", True, "159059725"),
],
lambda d: (d.other_name, d.other_chain, d.is_open, d.book_isbn),
)
def test_null_annotation(self):
"""
Annotating None onto a model round-trips
"""
book = Book.objects.annotate(
no_value=Value(None, output_field=IntegerField())
).first()
self.assertIsNone(book.no_value)
def test_order_by_annotation(self):
authors = Author.objects.annotate(other_age=F("age")).order_by("other_age")
self.assertQuerysetEqual(
authors,
[
25,
29,
29,
34,
35,
37,
45,
46,
57,
],
lambda a: a.other_age,
)
def test_order_by_aggregate(self):
authors = (
Author.objects.values("age")
.annotate(age_count=Count("age"))
.order_by("age_count", "age")
)
self.assertQuerysetEqual(
authors,
[
(25, 1),
(34, 1),
(35, 1),
(37, 1),
(45, 1),
(46, 1),
(57, 1),
(29, 2),
],
lambda a: (a["age"], a["age_count"]),
)
def test_raw_sql_with_inherited_field(self):
DepartmentStore.objects.create(
name="Angus & Robinson",
original_opening=datetime.date(2014, 3, 8),
friday_night_closing=datetime.time(21),
chain="Westfield",
area=123,
)
tests = (
("name", "Angus & Robinson"),
("surface", 123),
("case when name='Angus & Robinson' then chain else name end", "Westfield"),
)
for sql, expected_result in tests:
with self.subTest(sql=sql):
self.assertSequenceEqual(
DepartmentStore.objects.annotate(
annotation=RawSQL(sql, ()),
).values_list("annotation", flat=True),
[expected_result],
)
def test_annotate_exists(self):
authors = Author.objects.annotate(c=Count("id")).filter(c__gt=1)
self.assertFalse(authors.exists())
def test_column_field_ordering(self):
"""
Columns are aligned in the correct order for resolve_columns. This test
will fail on MySQL if column ordering is out. Column fields should be
aligned as:
1. extra_select
2. model_fields
3. annotation_fields
4. model_related_fields
"""
store = Store.objects.first()
Employee.objects.create(
id=1,
first_name="Max",
manager=True,
last_name="Paine",
store=store,
age=23,
salary=Decimal(50000.00),
)
Employee.objects.create(
id=2,
first_name="Buffy",
manager=False,
last_name="Summers",
store=store,
age=18,
salary=Decimal(40000.00),
)
qs = (
Employee.objects.extra(select={"random_value": "42"})
.select_related("store")
.annotate(
annotated_value=Value(17),
)
)
rows = [
(1, "Max", True, 42, "Paine", 23, Decimal(50000.00), store.name, 17),
(2, "Buffy", False, 42, "Summers", 18, Decimal(40000.00), store.name, 17),
]
self.assertQuerysetEqual(
qs.order_by("id"),
rows,
lambda e: (
e.id,
e.first_name,
e.manager,
e.random_value,
e.last_name,
e.age,
e.salary,
e.store.name,
e.annotated_value,
),
)
def test_column_field_ordering_with_deferred(self):
store = Store.objects.first()
Employee.objects.create(
id=1,
first_name="Max",
manager=True,
last_name="Paine",
store=store,
age=23,
salary=Decimal(50000.00),
)
Employee.objects.create(
id=2,
first_name="Buffy",
manager=False,
last_name="Summers",
store=store,
age=18,
salary=Decimal(40000.00),
)
qs = (
Employee.objects.extra(select={"random_value": "42"})
.select_related("store")
.annotate(
annotated_value=Value(17),
)
)
rows = [
(1, "Max", True, 42, "Paine", 23, Decimal(50000.00), store.name, 17),
(2, "Buffy", False, 42, "Summers", 18, Decimal(40000.00), store.name, 17),
]
# and we respect deferred columns!
self.assertQuerysetEqual(
qs.defer("age").order_by("id"),
rows,
lambda e: (
e.id,
e.first_name,
e.manager,
e.random_value,
e.last_name,
e.age,
e.salary,
e.store.name,
e.annotated_value,
),
)
def test_custom_functions(self):
Company(
name="Apple",
motto=None,
ticker_name="APPL",
description="Beautiful Devices",
).save()
Company(
name="Django Software Foundation",
motto=None,
ticker_name=None,
description=None,
).save()
Company(
name="Google",
motto="Do No Evil",
ticker_name="GOOG",
description="Internet Company",
).save()
Company(
name="Yahoo", motto=None, ticker_name=None, description="Internet Company"
).save()
qs = Company.objects.annotate(
tagline=Func(
F("motto"),
F("ticker_name"),
F("description"),
Value("No Tag"),
function="COALESCE",
)
).order_by("name")
self.assertQuerysetEqual(
qs,
[
("Apple", "APPL"),
("Django Software Foundation", "No Tag"),
("Google", "Do No Evil"),
("Yahoo", "Internet Company"),
],
lambda c: (c.name, c.tagline),
)
def test_custom_functions_can_ref_other_functions(self):
Company(
name="Apple",
motto=None,
ticker_name="APPL",
description="Beautiful Devices",
).save()
Company(
name="Django Software Foundation",
motto=None,
ticker_name=None,
description=None,
).save()
Company(
name="Google",
motto="Do No Evil",
ticker_name="GOOG",
description="Internet Company",
).save()
Company(
name="Yahoo", motto=None, ticker_name=None, description="Internet Company"
).save()
class Lower(Func):
function = "LOWER"
qs = (
Company.objects.annotate(
tagline=Func(
F("motto"),
F("ticker_name"),
F("description"),
Value("No Tag"),
function="COALESCE",
)
)
.annotate(
tagline_lower=Lower(F("tagline")),
)
.order_by("name")
)
# LOWER function supported by:
# oracle, postgres, mysql, sqlite, sqlserver
self.assertQuerysetEqual(
qs,
[
("Apple", "APPL".lower()),
("Django Software Foundation", "No Tag".lower()),
("Google", "Do No Evil".lower()),
("Yahoo", "Internet Company".lower()),
],
lambda c: (c.name, c.tagline_lower),
)
def test_boolean_value_annotation(self):
books = Book.objects.annotate(
is_book=Value(True, output_field=BooleanField()),
is_pony=Value(False, output_field=BooleanField()),
is_none=Value(None, output_field=BooleanField(null=True)),
)
self.assertGreater(len(books), 0)
for book in books:
self.assertIs(book.is_book, True)
self.assertIs(book.is_pony, False)
self.assertIsNone(book.is_none)
def test_annotation_in_f_grouped_by_annotation(self):
qs = (
Publisher.objects.annotate(multiplier=Value(3))
# group by option => sum of value * multiplier
.values("name")
.annotate(multiplied_value_sum=Sum(F("multiplier") * F("num_awards")))
.order_by()
)
self.assertCountEqual(
qs,
[
{"multiplied_value_sum": 9, "name": "Apress"},
{"multiplied_value_sum": 0, "name": "Jonno's House of Books"},
{"multiplied_value_sum": 27, "name": "Morgan Kaufmann"},
{"multiplied_value_sum": 21, "name": "Prentice Hall"},
{"multiplied_value_sum": 3, "name": "Sams"},
],
)
def test_arguments_must_be_expressions(self):
msg = "QuerySet.annotate() received non-expression(s): %s."
with self.assertRaisesMessage(TypeError, msg % BooleanField()):
Book.objects.annotate(BooleanField())
with self.assertRaisesMessage(TypeError, msg % True):
Book.objects.annotate(is_book=True)
with self.assertRaisesMessage(
TypeError, msg % ", ".join([str(BooleanField()), "True"])
):
Book.objects.annotate(BooleanField(), Value(False), is_book=True)
def test_chaining_annotation_filter_with_m2m(self):
qs = (
Author.objects.filter(
name="Adrian Holovaty",
friends__age=35,
)
.annotate(
jacob_name=F("friends__name"),
)
.filter(
friends__age=29,
)
.annotate(
james_name=F("friends__name"),
)
.values("jacob_name", "james_name")
)
self.assertCountEqual(
qs,
[{"jacob_name": "Jacob Kaplan-Moss", "james_name": "James Bennett"}],
)
def test_annotation_filter_with_subquery(self):
long_books_qs = (
Book.objects.filter(
publisher=OuterRef("pk"),
pages__gt=400,
)
.values("publisher")
.annotate(count=Count("pk"))
.values("count")
)
publisher_books_qs = (
Publisher.objects.annotate(
total_books=Count("book"),
)
.filter(
total_books=Subquery(long_books_qs, output_field=IntegerField()),
)
.values("name")
)
self.assertCountEqual(
publisher_books_qs, [{"name": "Sams"}, {"name": "Morgan Kaufmann"}]
)
def test_annotation_and_alias_filter_in_subquery(self):
awarded_publishers_qs = (
Publisher.objects.filter(num_awards__gt=4)
.annotate(publisher_annotate=Value(1))
.alias(publisher_alias=Value(1))
)
qs = Publisher.objects.filter(pk__in=awarded_publishers_qs)
self.assertCountEqual(qs, [self.p3, self.p4])
def test_annotation_and_alias_filter_related_in_subquery(self):
long_books_qs = (
Book.objects.filter(pages__gt=400)
.annotate(book_annotate=Value(1))
.alias(book_alias=Value(1))
)
publisher_books_qs = Publisher.objects.filter(
book__in=long_books_qs,
).values("name")
self.assertCountEqual(
publisher_books_qs,
[
{"name": "Apress"},
{"name": "Sams"},
{"name": "Prentice Hall"},
{"name": "Morgan Kaufmann"},
],
)
def test_annotation_exists_aggregate_values_chaining(self):
qs = (
Book.objects.values("publisher")
.annotate(
has_authors=Exists(
Book.authors.through.objects.filter(book=OuterRef("pk"))
),
max_pubdate=Max("pubdate"),
)
.values_list("max_pubdate", flat=True)
.order_by("max_pubdate")
)
self.assertCountEqual(
qs,
[
datetime.date(1991, 10, 15),
datetime.date(2008, 3, 3),
datetime.date(2008, 6, 23),
datetime.date(2008, 11, 3),
],
)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_annotation_subquery_and_aggregate_values_chaining(self):
qs = (
Book.objects.annotate(pub_year=ExtractYear("pubdate"))
.values("pub_year")
.annotate(
top_rating=Subquery(
Book.objects.filter(pubdate__year=OuterRef("pub_year"))
.order_by("-rating")
.values("rating")[:1]
),
total_pages=Sum("pages"),
)
.values("pub_year", "total_pages", "top_rating")
)
self.assertCountEqual(
qs,
[
{"pub_year": 1991, "top_rating": 5.0, "total_pages": 946},
{"pub_year": 1995, "top_rating": 4.0, "total_pages": 1132},
{"pub_year": 2007, "top_rating": 4.5, "total_pages": 447},
{"pub_year": 2008, "top_rating": 4.0, "total_pages": 1178},
],
)
def test_annotation_subquery_outerref_transform(self):
qs = Book.objects.annotate(
top_rating_year=Subquery(
Book.objects.filter(pubdate__year=OuterRef("pubdate__year"))
.order_by("-rating")
.values("rating")[:1]
),
).values("pubdate__year", "top_rating_year")
self.assertCountEqual(
qs,
[
{"pubdate__year": 1991, "top_rating_year": 5.0},
{"pubdate__year": 1995, "top_rating_year": 4.0},
{"pubdate__year": 2007, "top_rating_year": 4.5},
{"pubdate__year": 2008, "top_rating_year": 4.0},
{"pubdate__year": 2008, "top_rating_year": 4.0},
{"pubdate__year": 2008, "top_rating_year": 4.0},
],
)
def test_annotation_aggregate_with_m2o(self):
qs = (
Author.objects.filter(age__lt=30)
.annotate(
max_pages=Case(
When(book_contact_set__isnull=True, then=Value(0)),
default=Max(F("book__pages")),
),
)
.values("name", "max_pages")
)
self.assertCountEqual(
qs,
[
{"name": "James Bennett", "max_pages": 300},
{"name": "Paul Bissex", "max_pages": 0},
{"name": "Wesley J. Chun", "max_pages": 0},
],
)
def test_alias_sql_injection(self):
crafted_alias = """injected_name" from "annotations_book"; --"""
msg = (
"Column aliases cannot contain whitespace characters, quotation marks, "
"semicolons, or SQL comments."
)
with self.assertRaisesMessage(ValueError, msg):
Book.objects.annotate(**{crafted_alias: Value(1)})
def test_alias_forbidden_chars(self):
tests = [
'al"ias',
"a'lias",
"ali`as",
"alia s",
"alias\t",
"ali\nas",
"alias--",
"ali/*as",
"alias*/",
"alias;",
# [] are used by MSSQL.
"alias[",
"alias]",
]
msg = (
"Column aliases cannot contain whitespace characters, quotation marks, "
"semicolons, or SQL comments."
)
for crafted_alias in tests:
with self.subTest(crafted_alias):
with self.assertRaisesMessage(ValueError, msg):
Book.objects.annotate(**{crafted_alias: Value(1)})
class AliasTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name="Adrian Holovaty", age=34)
cls.a2 = Author.objects.create(name="Jacob Kaplan-Moss", age=35)
cls.a3 = Author.objects.create(name="James Bennett", age=34)
cls.a4 = Author.objects.create(name="Peter Norvig", age=57)
cls.a5 = Author.objects.create(name="Stuart Russell", age=46)
p1 = Publisher.objects.create(name="Apress", num_awards=3)
cls.b1 = Book.objects.create(
isbn="159059725",
pages=447,
rating=4.5,
price=Decimal("30.00"),
contact=cls.a1,
publisher=p1,
pubdate=datetime.date(2007, 12, 6),
name="The Definitive Guide to Django: Web Development Done Right",
)
cls.b2 = Book.objects.create(
isbn="159059996",
pages=300,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a3,
publisher=p1,
pubdate=datetime.date(2008, 6, 23),
name="Practical Django Projects",
)
cls.b3 = Book.objects.create(
isbn="013790395",
pages=1132,
rating=4.0,
price=Decimal("82.80"),
contact=cls.a4,
publisher=p1,
pubdate=datetime.date(1995, 1, 15),
name="Artificial Intelligence: A Modern Approach",
)
cls.b4 = Book.objects.create(
isbn="155860191",
pages=946,
rating=5.0,
price=Decimal("75.00"),
contact=cls.a4,
publisher=p1,
pubdate=datetime.date(1991, 10, 15),
name=(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp"
),
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4, cls.a5)
cls.b4.authors.add(cls.a4)
Store.objects.create(
name="Amazon.com",
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59),
)
Store.objects.create(
name="Books.com",
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59),
)
def test_basic_alias(self):
qs = Book.objects.alias(is_book=Value(1))
self.assertIs(hasattr(qs.first(), "is_book"), False)
def test_basic_alias_annotation(self):
qs = Book.objects.alias(
is_book_alias=Value(1),
).annotate(is_book=F("is_book_alias"))
self.assertIs(hasattr(qs.first(), "is_book_alias"), False)
for book in qs:
with self.subTest(book=book):
self.assertEqual(book.is_book, 1)
def test_basic_alias_f_annotation(self):
qs = Book.objects.alias(another_rating_alias=F("rating")).annotate(
another_rating=F("another_rating_alias")
)
self.assertIs(hasattr(qs.first(), "another_rating_alias"), False)
for book in qs:
with self.subTest(book=book):
self.assertEqual(book.another_rating, book.rating)
def test_basic_alias_f_transform_annotation(self):
qs = Book.objects.alias(
pubdate_alias=F("pubdate"),
).annotate(pubdate_year=F("pubdate_alias__year"))
self.assertIs(hasattr(qs.first(), "pubdate_alias"), False)
for book in qs:
with self.subTest(book=book):
self.assertEqual(book.pubdate_year, book.pubdate.year)
def test_alias_after_annotation(self):
qs = Book.objects.annotate(
is_book=Value(1),
).alias(is_book_alias=F("is_book"))
book = qs.first()
self.assertIs(hasattr(book, "is_book"), True)
self.assertIs(hasattr(book, "is_book_alias"), False)
def test_overwrite_annotation_with_alias(self):
qs = Book.objects.annotate(is_book=Value(1)).alias(is_book=F("is_book"))
self.assertIs(hasattr(qs.first(), "is_book"), False)
def test_overwrite_alias_with_annotation(self):
qs = Book.objects.alias(is_book=Value(1)).annotate(is_book=F("is_book"))
for book in qs:
with self.subTest(book=book):
self.assertEqual(book.is_book, 1)
def test_alias_annotation_expression(self):
qs = Book.objects.alias(
is_book_alias=Value(1),
).annotate(is_book=Coalesce("is_book_alias", 0))
self.assertIs(hasattr(qs.first(), "is_book_alias"), False)
for book in qs:
with self.subTest(book=book):
self.assertEqual(book.is_book, 1)
def test_alias_default_alias_expression(self):
qs = Author.objects.alias(
Sum("book__pages"),
).filter(book__pages__sum__gt=2000)
self.assertIs(hasattr(qs.first(), "book__pages__sum"), False)
self.assertSequenceEqual(qs, [self.a4])
def test_joined_alias_annotation(self):
qs = (
Book.objects.select_related("publisher")
.alias(
num_awards_alias=F("publisher__num_awards"),
)
.annotate(num_awards=F("num_awards_alias"))
)
self.assertIs(hasattr(qs.first(), "num_awards_alias"), False)
for book in qs:
with self.subTest(book=book):
self.assertEqual(book.num_awards, book.publisher.num_awards)
def test_alias_annotate_with_aggregation(self):
qs = Book.objects.alias(
is_book_alias=Value(1),
rating_count_alias=Count("rating"),
).annotate(
is_book=F("is_book_alias"),
rating_count=F("rating_count_alias"),
)
book = qs.first()
self.assertIs(hasattr(book, "is_book_alias"), False)
self.assertIs(hasattr(book, "rating_count_alias"), False)
for book in qs:
with self.subTest(book=book):
self.assertEqual(book.is_book, 1)
self.assertEqual(book.rating_count, 1)
def test_filter_alias_with_f(self):
qs = Book.objects.alias(
other_rating=F("rating"),
).filter(other_rating=4.5)
self.assertIs(hasattr(qs.first(), "other_rating"), False)
self.assertSequenceEqual(qs, [self.b1])
def test_filter_alias_with_double_f(self):
qs = Book.objects.alias(
other_rating=F("rating"),
).filter(other_rating=F("rating"))
self.assertIs(hasattr(qs.first(), "other_rating"), False)
self.assertEqual(qs.count(), Book.objects.count())
def test_filter_alias_agg_with_double_f(self):
qs = Book.objects.alias(
sum_rating=Sum("rating"),
).filter(sum_rating=F("sum_rating"))
self.assertIs(hasattr(qs.first(), "sum_rating"), False)
self.assertEqual(qs.count(), Book.objects.count())
def test_update_with_alias(self):
Book.objects.alias(
other_rating=F("rating") - 1,
).update(rating=F("other_rating"))
self.b1.refresh_from_db()
self.assertEqual(self.b1.rating, 3.5)
def test_order_by_alias(self):
qs = Author.objects.alias(other_age=F("age")).order_by("other_age")
self.assertIs(hasattr(qs.first(), "other_age"), False)
self.assertQuerysetEqual(qs, [34, 34, 35, 46, 57], lambda a: a.age)
def test_order_by_alias_aggregate(self):
qs = (
Author.objects.values("age")
.alias(age_count=Count("age"))
.order_by("age_count", "age")
)
self.assertIs(hasattr(qs.first(), "age_count"), False)
self.assertQuerysetEqual(qs, [35, 46, 57, 34], lambda a: a["age"])
def test_dates_alias(self):
qs = Book.objects.alias(
pubdate_alias=F("pubdate"),
).dates("pubdate_alias", "month")
self.assertCountEqual(
qs,
[
datetime.date(1991, 10, 1),
datetime.date(1995, 1, 1),
datetime.date(2007, 12, 1),
datetime.date(2008, 6, 1),
],
)
def test_datetimes_alias(self):
qs = Store.objects.alias(
original_opening_alias=F("original_opening"),
).datetimes("original_opening_alias", "year")
self.assertCountEqual(
qs,
[
datetime.datetime(1994, 1, 1),
datetime.datetime(2001, 1, 1),
],
)
def test_aggregate_alias(self):
msg = (
"Cannot aggregate over the 'other_age' alias. Use annotate() to promote it."
)
with self.assertRaisesMessage(FieldError, msg):
Author.objects.alias(
other_age=F("age"),
).aggregate(otherage_sum=Sum("other_age"))
def test_defer_only_alias(self):
qs = Book.objects.alias(rating_alias=F("rating") - 1)
msg = "Book has no field named 'rating_alias'"
for operation in ["defer", "only"]:
with self.subTest(operation=operation):
with self.assertRaisesMessage(FieldDoesNotExist, msg):
getattr(qs, operation)("rating_alias").first()
@skipUnlessDBFeature("can_distinct_on_fields")
def test_distinct_on_alias(self):
qs = Book.objects.alias(rating_alias=F("rating") - 1)
msg = "Cannot resolve keyword 'rating_alias' into field."
with self.assertRaisesMessage(FieldError, msg):
qs.distinct("rating_alias").first()
def test_values_alias(self):
qs = Book.objects.alias(rating_alias=F("rating") - 1)
msg = "Cannot select the 'rating_alias' alias. Use annotate() to promote it."
for operation in ["values", "values_list"]:
with self.subTest(operation=operation):
with self.assertRaisesMessage(FieldError, msg):
getattr(qs, operation)("rating_alias")
def test_alias_sql_injection(self):
crafted_alias = """injected_name" from "annotations_book"; --"""
msg = (
"Column aliases cannot contain whitespace characters, quotation marks, "
"semicolons, or SQL comments."
)
with self.assertRaisesMessage(ValueError, msg):
Book.objects.alias(**{crafted_alias: Value(1)})
|
248bf334b4809bb94d56ba19cbb133e4b50f1c4bd5c255c5fbe8fc85acc9a777 | import time
import traceback
from datetime import date, datetime, timedelta
from threading import Thread
from django.core.exceptions import FieldError
from django.db import DatabaseError, IntegrityError, connection
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from django.utils.functional import lazy
from .models import (
Author,
Book,
DefaultPerson,
Journalist,
ManualPrimaryKeyTest,
Person,
Profile,
Publisher,
Tag,
Thing,
)
class GetOrCreateTests(TestCase):
@classmethod
def setUpTestData(cls):
Person.objects.create(
first_name="John", last_name="Lennon", birthday=date(1940, 10, 9)
)
def test_get_or_create_method_with_get(self):
created = Person.objects.get_or_create(
first_name="John",
last_name="Lennon",
defaults={"birthday": date(1940, 10, 9)},
)[1]
self.assertFalse(created)
self.assertEqual(Person.objects.count(), 1)
def test_get_or_create_method_with_create(self):
created = Person.objects.get_or_create(
first_name="George",
last_name="Harrison",
defaults={"birthday": date(1943, 2, 25)},
)[1]
self.assertTrue(created)
self.assertEqual(Person.objects.count(), 2)
def test_get_or_create_redundant_instance(self):
"""
If we execute the exact same statement twice, the second time,
it won't create a Person.
"""
Person.objects.get_or_create(
first_name="George",
last_name="Harrison",
defaults={"birthday": date(1943, 2, 25)},
)
created = Person.objects.get_or_create(
first_name="George",
last_name="Harrison",
defaults={"birthday": date(1943, 2, 25)},
)[1]
self.assertFalse(created)
self.assertEqual(Person.objects.count(), 2)
def test_get_or_create_invalid_params(self):
"""
If you don't specify a value or default value for all required
fields, you will get an error.
"""
with self.assertRaises(IntegrityError):
Person.objects.get_or_create(first_name="Tom", last_name="Smith")
def test_get_or_create_with_pk_property(self):
"""
Using the pk property of a model is allowed.
"""
Thing.objects.get_or_create(pk=1)
def test_get_or_create_with_model_property_defaults(self):
"""Using a property with a setter implemented is allowed."""
t, _ = Thing.objects.get_or_create(
defaults={"capitalized_name_property": "annie"}, pk=1
)
self.assertEqual(t.name, "Annie")
def test_get_or_create_on_related_manager(self):
p = Publisher.objects.create(name="Acme Publishing")
# Create a book through the publisher.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertTrue(created)
# The publisher should have one book.
self.assertEqual(p.books.count(), 1)
# Try get_or_create again, this time nothing should be created.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertFalse(created)
# And the publisher should still have one book.
self.assertEqual(p.books.count(), 1)
# Add an author to the book.
ed, created = book.authors.get_or_create(name="Ed")
self.assertTrue(created)
# The book should have one author.
self.assertEqual(book.authors.count(), 1)
# Try get_or_create again, this time nothing should be created.
ed, created = book.authors.get_or_create(name="Ed")
self.assertFalse(created)
# And the book should still have one author.
self.assertEqual(book.authors.count(), 1)
# Add a second author to the book.
fred, created = book.authors.get_or_create(name="Fred")
self.assertTrue(created)
# The book should have two authors now.
self.assertEqual(book.authors.count(), 2)
# Create an Author not tied to any books.
Author.objects.create(name="Ted")
# There should be three Authors in total. The book object should have two.
self.assertEqual(Author.objects.count(), 3)
self.assertEqual(book.authors.count(), 2)
# Try creating a book through an author.
_, created = ed.books.get_or_create(name="Ed's Recipes", publisher=p)
self.assertTrue(created)
# Now Ed has two Books, Fred just one.
self.assertEqual(ed.books.count(), 2)
self.assertEqual(fred.books.count(), 1)
# Use the publisher's primary key value instead of a model instance.
_, created = ed.books.get_or_create(
name="The Great Book of Ed", publisher_id=p.id
)
self.assertTrue(created)
# Try get_or_create again, this time nothing should be created.
_, created = ed.books.get_or_create(
name="The Great Book of Ed", publisher_id=p.id
)
self.assertFalse(created)
# The publisher should have three books.
self.assertEqual(p.books.count(), 3)
def test_defaults_exact(self):
"""
If you have a field named defaults and want to use it as an exact
lookup, you need to use 'defaults__exact'.
"""
obj, created = Person.objects.get_or_create(
first_name="George",
last_name="Harrison",
defaults__exact="testing",
defaults={
"birthday": date(1943, 2, 25),
"defaults": "testing",
},
)
self.assertTrue(created)
self.assertEqual(obj.defaults, "testing")
obj2, created = Person.objects.get_or_create(
first_name="George",
last_name="Harrison",
defaults__exact="testing",
defaults={
"birthday": date(1943, 2, 25),
"defaults": "testing",
},
)
self.assertFalse(created)
self.assertEqual(obj, obj2)
def test_callable_defaults(self):
"""
Callables in `defaults` are evaluated if the instance is created.
"""
obj, created = Person.objects.get_or_create(
first_name="George",
defaults={"last_name": "Harrison", "birthday": lambda: date(1943, 2, 25)},
)
self.assertTrue(created)
self.assertEqual(date(1943, 2, 25), obj.birthday)
def test_callable_defaults_not_called(self):
def raise_exception():
raise AssertionError
obj, created = Person.objects.get_or_create(
first_name="John",
last_name="Lennon",
defaults={"birthday": lambda: raise_exception()},
)
def test_defaults_not_evaluated_unless_needed(self):
"""`defaults` aren't evaluated if the instance isn't created."""
def raise_exception():
raise AssertionError
obj, created = Person.objects.get_or_create(
first_name="John",
defaults=lazy(raise_exception, object)(),
)
self.assertFalse(created)
class GetOrCreateTestsWithManualPKs(TestCase):
@classmethod
def setUpTestData(cls):
ManualPrimaryKeyTest.objects.create(id=1, data="Original")
def test_create_with_duplicate_primary_key(self):
"""
If you specify an existing primary key, but different other fields,
then you will get an error and data will not be updated.
"""
with self.assertRaises(IntegrityError):
ManualPrimaryKeyTest.objects.get_or_create(id=1, data="Different")
self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original")
def test_get_or_create_raises_IntegrityError_plus_traceback(self):
"""
get_or_create should raise IntegrityErrors with the full traceback.
This is tested by checking that a known method call is in the traceback.
We cannot use assertRaises here because we need to inspect
the actual traceback. Refs #16340.
"""
try:
ManualPrimaryKeyTest.objects.get_or_create(id=1, data="Different")
except IntegrityError:
formatted_traceback = traceback.format_exc()
self.assertIn("obj.save", formatted_traceback)
def test_savepoint_rollback(self):
"""
The database connection is still usable after a DatabaseError in
get_or_create() (#20463).
"""
Tag.objects.create(text="foo")
with self.assertRaises(DatabaseError):
# pk 123456789 doesn't exist, so the tag object will be created.
# Saving triggers a unique constraint violation on 'text'.
Tag.objects.get_or_create(pk=123456789, defaults={"text": "foo"})
# Tag objects can be created after the error.
Tag.objects.create(text="bar")
def test_get_or_create_empty(self):
"""
If all the attributes on a model have defaults, get_or_create() doesn't
require any arguments.
"""
DefaultPerson.objects.get_or_create()
class GetOrCreateTransactionTests(TransactionTestCase):
available_apps = ["get_or_create"]
def test_get_or_create_integrityerror(self):
"""
Regression test for #15117. Requires a TransactionTestCase on
databases that delay integrity checks until the end of transactions,
otherwise the exception is never raised.
"""
try:
Profile.objects.get_or_create(person=Person(id=1))
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
class GetOrCreateThroughManyToMany(TestCase):
def test_get_get_or_create(self):
tag = Tag.objects.create(text="foo")
a_thing = Thing.objects.create(name="a")
a_thing.tags.add(tag)
obj, created = a_thing.tags.get_or_create(text="foo")
self.assertFalse(created)
self.assertEqual(obj.pk, tag.pk)
def test_create_get_or_create(self):
a_thing = Thing.objects.create(name="a")
obj, created = a_thing.tags.get_or_create(text="foo")
self.assertTrue(created)
self.assertEqual(obj.text, "foo")
self.assertIn(obj, a_thing.tags.all())
def test_something(self):
Tag.objects.create(text="foo")
a_thing = Thing.objects.create(name="a")
with self.assertRaises(IntegrityError):
a_thing.tags.get_or_create(text="foo")
class UpdateOrCreateTests(TestCase):
def test_update(self):
Person.objects.create(
first_name="John", last_name="Lennon", birthday=date(1940, 10, 9)
)
p, created = Person.objects.update_or_create(
first_name="John",
last_name="Lennon",
defaults={"birthday": date(1940, 10, 10)},
)
self.assertFalse(created)
self.assertEqual(p.first_name, "John")
self.assertEqual(p.last_name, "Lennon")
self.assertEqual(p.birthday, date(1940, 10, 10))
def test_create(self):
p, created = Person.objects.update_or_create(
first_name="John",
last_name="Lennon",
defaults={"birthday": date(1940, 10, 10)},
)
self.assertTrue(created)
self.assertEqual(p.first_name, "John")
self.assertEqual(p.last_name, "Lennon")
self.assertEqual(p.birthday, date(1940, 10, 10))
def test_create_twice(self):
params = {
"first_name": "John",
"last_name": "Lennon",
"birthday": date(1940, 10, 10),
}
Person.objects.update_or_create(**params)
# If we execute the exact same statement, it won't create a Person.
p, created = Person.objects.update_or_create(**params)
self.assertFalse(created)
def test_integrity(self):
"""
If you don't specify a value or default value for all required
fields, you will get an error.
"""
with self.assertRaises(IntegrityError):
Person.objects.update_or_create(first_name="Tom", last_name="Smith")
def test_manual_primary_key_test(self):
"""
If you specify an existing primary key, but different other fields,
then you will get an error and data will not be updated.
"""
ManualPrimaryKeyTest.objects.create(id=1, data="Original")
with self.assertRaises(IntegrityError):
ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different")
self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original")
def test_with_pk_property(self):
"""
Using the pk property of a model is allowed.
"""
Thing.objects.update_or_create(pk=1)
def test_update_or_create_with_model_property_defaults(self):
"""Using a property with a setter implemented is allowed."""
t, _ = Thing.objects.get_or_create(
defaults={"capitalized_name_property": "annie"}, pk=1
)
self.assertEqual(t.name, "Annie")
def test_error_contains_full_traceback(self):
"""
update_or_create should raise IntegrityErrors with the full traceback.
This is tested by checking that a known method call is in the traceback.
We cannot use assertRaises/assertRaises here because we need to inspect
the actual traceback. Refs #16340.
"""
try:
ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different")
except IntegrityError:
formatted_traceback = traceback.format_exc()
self.assertIn("obj.save", formatted_traceback)
def test_create_with_related_manager(self):
"""
Should be able to use update_or_create from the related manager to
create a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
book, created = p.books.update_or_create(name="The Book of Ed & Fred")
self.assertTrue(created)
self.assertEqual(p.books.count(), 1)
def test_update_with_related_manager(self):
"""
Should be able to use update_or_create from the related manager to
update a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
book = Book.objects.create(name="The Book of Ed & Fred", publisher=p)
self.assertEqual(p.books.count(), 1)
name = "The Book of Django"
book, created = p.books.update_or_create(defaults={"name": name}, id=book.id)
self.assertFalse(created)
self.assertEqual(book.name, name)
self.assertEqual(p.books.count(), 1)
def test_create_with_many(self):
"""
Should be able to use update_or_create from the m2m related manager to
create a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
author = Author.objects.create(name="Ted")
book, created = author.books.update_or_create(
name="The Book of Ed & Fred", publisher=p
)
self.assertTrue(created)
self.assertEqual(author.books.count(), 1)
def test_update_with_many(self):
"""
Should be able to use update_or_create from the m2m related manager to
update a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
author = Author.objects.create(name="Ted")
book = Book.objects.create(name="The Book of Ed & Fred", publisher=p)
book.authors.add(author)
self.assertEqual(author.books.count(), 1)
name = "The Book of Django"
book, created = author.books.update_or_create(
defaults={"name": name}, id=book.id
)
self.assertFalse(created)
self.assertEqual(book.name, name)
self.assertEqual(author.books.count(), 1)
def test_defaults_exact(self):
"""
If you have a field named defaults and want to use it as an exact
lookup, you need to use 'defaults__exact'.
"""
obj, created = Person.objects.update_or_create(
first_name="George",
last_name="Harrison",
defaults__exact="testing",
defaults={
"birthday": date(1943, 2, 25),
"defaults": "testing",
},
)
self.assertTrue(created)
self.assertEqual(obj.defaults, "testing")
obj, created = Person.objects.update_or_create(
first_name="George",
last_name="Harrison",
defaults__exact="testing",
defaults={
"birthday": date(1943, 2, 25),
"defaults": "another testing",
},
)
self.assertFalse(created)
self.assertEqual(obj.defaults, "another testing")
def test_create_callable_default(self):
obj, created = Person.objects.update_or_create(
first_name="George",
last_name="Harrison",
defaults={"birthday": lambda: date(1943, 2, 25)},
)
self.assertIs(created, True)
self.assertEqual(obj.birthday, date(1943, 2, 25))
def test_update_callable_default(self):
Person.objects.update_or_create(
first_name="George",
last_name="Harrison",
birthday=date(1942, 2, 25),
)
obj, created = Person.objects.update_or_create(
first_name="George",
defaults={"last_name": lambda: "NotHarrison"},
)
self.assertIs(created, False)
self.assertEqual(obj.last_name, "NotHarrison")
def test_defaults_not_evaluated_unless_needed(self):
"""`defaults` aren't evaluated if the instance isn't created."""
Person.objects.create(
first_name="John", last_name="Lennon", birthday=date(1940, 10, 9)
)
def raise_exception():
raise AssertionError
obj, created = Person.objects.get_or_create(
first_name="John",
defaults=lazy(raise_exception, object)(),
)
self.assertFalse(created)
def test_mti_update_non_local_concrete_fields(self):
journalist = Journalist.objects.create(name="Jane", specialty="Politics")
journalist, created = Journalist.objects.update_or_create(
pk=journalist.pk,
defaults={"name": "John"},
)
self.assertIs(created, False)
self.assertEqual(journalist.name, "John")
class UpdateOrCreateTestsWithManualPKs(TestCase):
def test_create_with_duplicate_primary_key(self):
"""
If an existing primary key is specified with different values for other
fields, then IntegrityError is raised and data isn't updated.
"""
ManualPrimaryKeyTest.objects.create(id=1, data="Original")
with self.assertRaises(IntegrityError):
ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different")
self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original")
class UpdateOrCreateTransactionTests(TransactionTestCase):
available_apps = ["get_or_create"]
@skipUnlessDBFeature("has_select_for_update")
@skipUnlessDBFeature("supports_transactions")
def test_updates_in_transaction(self):
"""
Objects are selected and updated in a transaction to avoid race
conditions. This test forces update_or_create() to hold the lock
in another thread for a relatively long time so that it can update
while it holds the lock. The updated field isn't a field in 'defaults',
so update_or_create() shouldn't have an effect on it.
"""
lock_status = {"has_grabbed_lock": False}
def birthday_sleep():
lock_status["has_grabbed_lock"] = True
time.sleep(0.5)
return date(1940, 10, 10)
def update_birthday_slowly():
Person.objects.update_or_create(
first_name="John", defaults={"birthday": birthday_sleep}
)
# Avoid leaking connection for Oracle
connection.close()
def lock_wait():
# timeout after ~0.5 seconds
for i in range(20):
time.sleep(0.025)
if lock_status["has_grabbed_lock"]:
return True
return False
Person.objects.create(
first_name="John", last_name="Lennon", birthday=date(1940, 10, 9)
)
# update_or_create in a separate thread
t = Thread(target=update_birthday_slowly)
before_start = datetime.now()
t.start()
if not lock_wait():
self.skipTest("Database took too long to lock the row")
# Update during lock
Person.objects.filter(first_name="John").update(last_name="NotLennon")
after_update = datetime.now()
# Wait for thread to finish
t.join()
# The update remains and it blocked.
updated_person = Person.objects.get(first_name="John")
self.assertGreater(after_update - before_start, timedelta(seconds=0.5))
self.assertEqual(updated_person.last_name, "NotLennon")
@skipUnlessDBFeature("has_select_for_update")
@skipUnlessDBFeature("supports_transactions")
def test_creation_in_transaction(self):
"""
Objects are selected and updated in a transaction to avoid race
conditions. This test checks the behavior of update_or_create() when
the object doesn't already exist, but another thread creates the
object before update_or_create() does and then attempts to update the
object, also before update_or_create(). It forces update_or_create() to
hold the lock in another thread for a relatively long time so that it
can update while it holds the lock. The updated field isn't a field in
'defaults', so update_or_create() shouldn't have an effect on it.
"""
lock_status = {"lock_count": 0}
def birthday_sleep():
lock_status["lock_count"] += 1
time.sleep(0.5)
return date(1940, 10, 10)
def update_birthday_slowly():
try:
Person.objects.update_or_create(
first_name="John", defaults={"birthday": birthday_sleep}
)
finally:
# Avoid leaking connection for Oracle
connection.close()
def lock_wait(expected_lock_count):
# timeout after ~0.5 seconds
for i in range(20):
time.sleep(0.025)
if lock_status["lock_count"] == expected_lock_count:
return True
self.skipTest("Database took too long to lock the row")
# update_or_create in a separate thread.
t = Thread(target=update_birthday_slowly)
before_start = datetime.now()
t.start()
lock_wait(1)
# Create object *after* initial attempt by update_or_create to get obj
# but before creation attempt.
Person.objects.create(
first_name="John", last_name="Lennon", birthday=date(1940, 10, 9)
)
lock_wait(2)
# At this point, the thread is pausing for 0.5 seconds, so now attempt
# to modify object before update_or_create() calls save(). This should
# be blocked until after the save().
Person.objects.filter(first_name="John").update(last_name="NotLennon")
after_update = datetime.now()
# Wait for thread to finish
t.join()
# Check call to update_or_create() succeeded and the subsequent
# (blocked) call to update().
updated_person = Person.objects.get(first_name="John")
self.assertEqual(
updated_person.birthday, date(1940, 10, 10)
) # set by update_or_create()
self.assertEqual(updated_person.last_name, "NotLennon") # set by update()
self.assertGreater(after_update - before_start, timedelta(seconds=1))
class InvalidCreateArgumentsTests(TransactionTestCase):
available_apps = ["get_or_create"]
msg = "Invalid field name(s) for model Thing: 'nonexistent'."
bad_field_msg = (
"Cannot resolve keyword 'nonexistent' into field. Choices are: id, name, tags"
)
def test_get_or_create_with_invalid_defaults(self):
with self.assertRaisesMessage(FieldError, self.msg):
Thing.objects.get_or_create(name="a", defaults={"nonexistent": "b"})
def test_get_or_create_with_invalid_kwargs(self):
with self.assertRaisesMessage(FieldError, self.bad_field_msg):
Thing.objects.get_or_create(name="a", nonexistent="b")
def test_update_or_create_with_invalid_defaults(self):
with self.assertRaisesMessage(FieldError, self.msg):
Thing.objects.update_or_create(name="a", defaults={"nonexistent": "b"})
def test_update_or_create_with_invalid_kwargs(self):
with self.assertRaisesMessage(FieldError, self.bad_field_msg):
Thing.objects.update_or_create(name="a", nonexistent="b")
def test_multiple_invalid_fields(self):
with self.assertRaisesMessage(FieldError, self.bad_field_msg):
Thing.objects.update_or_create(
name="a", nonexistent="b", defaults={"invalid": "c"}
)
def test_property_attribute_without_setter_defaults(self):
with self.assertRaisesMessage(
FieldError, "Invalid field name(s) for model Thing: 'name_in_all_caps'"
):
Thing.objects.update_or_create(
name="a", defaults={"name_in_all_caps": "FRANK"}
)
def test_property_attribute_without_setter_kwargs(self):
msg = (
"Cannot resolve keyword 'name_in_all_caps' into field. Choices are: id, "
"name, tags"
)
with self.assertRaisesMessage(FieldError, msg):
Thing.objects.update_or_create(
name_in_all_caps="FRANK", defaults={"name": "Frank"}
)
|
04f9f53a9f5943ab2255734d7526a36e51e924b00e78e01775e2c0885591ff72 | from django.db import models
class Person(models.Model):
first_name = models.CharField(max_length=100, unique=True)
last_name = models.CharField(max_length=100)
birthday = models.DateField()
defaults = models.TextField()
class DefaultPerson(models.Model):
first_name = models.CharField(max_length=100, default="Anonymous")
class ManualPrimaryKeyTest(models.Model):
id = models.IntegerField(primary_key=True)
data = models.CharField(max_length=100)
class Profile(models.Model):
person = models.ForeignKey(Person, models.CASCADE, primary_key=True)
class Tag(models.Model):
text = models.CharField(max_length=255, unique=True)
class Thing(models.Model):
name = models.CharField(max_length=255)
tags = models.ManyToManyField(Tag)
@property
def capitalized_name_property(self):
return self.name
@capitalized_name_property.setter
def capitalized_name_property(self, val):
self.name = val.capitalize()
@property
def name_in_all_caps(self):
return self.name.upper()
class Publisher(models.Model):
name = models.CharField(max_length=100)
class Author(models.Model):
name = models.CharField(max_length=100)
class Journalist(Author):
specialty = models.CharField(max_length=100)
class Book(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Author, related_name="books")
publisher = models.ForeignKey(
Publisher,
models.CASCADE,
related_name="books",
db_column="publisher_id_column",
)
|
f2e9f0f595bbaf43ec8001c77f236964bd370022e9c8eff56490743e4624c409 | import datetime
import pickle
import sys
import unittest
from operator import attrgetter
from threading import Lock
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DEFAULT_DB_ALIAS, connection
from django.db.models import CharField, Count, Exists, F, Max, OuterRef, Q
from django.db.models.expressions import RawSQL
from django.db.models.functions import ExtractYear, Length, LTrim
from django.db.models.sql.constants import LOUTER
from django.db.models.sql.where import AND, OR, NothingNode, WhereNode
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext, ignore_warnings, register_lookup
from django.utils.deprecation import RemovedInDjango50Warning
from .models import (
FK1,
Annotation,
Article,
Author,
BaseA,
BaseUser,
Book,
CategoryItem,
CategoryRelationship,
Celebrity,
Channel,
Chapter,
Child,
ChildObjectA,
Classroom,
CommonMixedCaseForeignKeys,
Company,
Cover,
CustomPk,
CustomPkTag,
DateTimePK,
Detail,
DumbCategory,
Eaten,
Employment,
ExtraInfo,
Fan,
Food,
Identifier,
Individual,
Item,
Job,
JobResponsibilities,
Join,
LeafA,
LeafB,
LoopX,
LoopZ,
ManagedModel,
Member,
MixedCaseDbColumnCategoryItem,
MixedCaseFieldCategoryItem,
ModelA,
ModelB,
ModelC,
ModelD,
MyObject,
NamedCategory,
Node,
Note,
NullableName,
Number,
ObjectA,
ObjectB,
ObjectC,
OneToOneCategory,
Order,
OrderItem,
Page,
Paragraph,
Person,
Plaything,
PointerA,
Program,
ProxyCategory,
ProxyObjectA,
ProxyObjectB,
Ranking,
Related,
RelatedIndividual,
RelatedObject,
Report,
ReportComment,
ReservedName,
Responsibility,
School,
SharedConnection,
SimpleCategory,
SingleObject,
SpecialCategory,
Staff,
StaffUser,
Student,
Tag,
Task,
Teacher,
Ticket21203Child,
Ticket21203Parent,
Ticket23605A,
Ticket23605B,
Ticket23605C,
TvChef,
Valid,
X,
)
class Queries1Tests(TestCase):
@classmethod
def setUpTestData(cls):
cls.nc1 = generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name="t1", category=generic)
cls.t2 = Tag.objects.create(name="t2", parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name="t3", parent=cls.t1)
cls.t4 = Tag.objects.create(name="t4", parent=cls.t3)
cls.t5 = Tag.objects.create(name="t5", parent=cls.t3)
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
cls.n2 = Note.objects.create(note="n2", misc="bar", id=2)
cls.n3 = Note.objects.create(note="n3", misc="foo", id=3, negate=False)
cls.ann1 = Annotation.objects.create(name="a1", tag=cls.t1)
cls.ann1.notes.add(cls.n1)
ann2 = Annotation.objects.create(name="a2", tag=cls.t4)
ann2.notes.add(cls.n2, cls.n3)
# Create these out of order so that sorting by 'id' will be different to sorting
# by 'info'. Helps detect some problems later.
cls.e2 = ExtraInfo.objects.create(
info="e2", note=cls.n2, value=41, filterable=False
)
e1 = ExtraInfo.objects.create(info="e1", note=cls.n1, value=42)
cls.a1 = Author.objects.create(name="a1", num=1001, extra=e1)
cls.a2 = Author.objects.create(name="a2", num=2002, extra=e1)
cls.a3 = Author.objects.create(name="a3", num=3003, extra=cls.e2)
cls.a4 = Author.objects.create(name="a4", num=4004, extra=cls.e2)
cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
cls.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
cls.i1 = Item.objects.create(
name="one",
created=cls.time1,
modified=cls.time1,
creator=cls.a1,
note=cls.n3,
)
cls.i1.tags.set([cls.t1, cls.t2])
cls.i2 = Item.objects.create(
name="two", created=cls.time2, creator=cls.a2, note=cls.n2
)
cls.i2.tags.set([cls.t1, cls.t3])
cls.i3 = Item.objects.create(
name="three", created=time3, creator=cls.a2, note=cls.n3
)
cls.i4 = Item.objects.create(
name="four", created=time4, creator=cls.a4, note=cls.n3
)
cls.i4.tags.set([cls.t4])
cls.r1 = Report.objects.create(name="r1", creator=cls.a1)
cls.r2 = Report.objects.create(name="r2", creator=cls.a3)
cls.r3 = Report.objects.create(name="r3")
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
# will be rank3, rank2, rank1.
cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2)
cls.c1 = Cover.objects.create(title="first", item=cls.i4)
cls.c2 = Cover.objects.create(title="second", item=cls.i2)
def test_subquery_condition(self):
qs1 = Tag.objects.filter(pk__lte=0)
qs2 = Tag.objects.filter(parent__in=qs1)
qs3 = Tag.objects.filter(parent__in=qs2)
self.assertEqual(qs3.query.subq_aliases, {"T", "U", "V"})
self.assertIn("v0", str(qs3.query).lower())
qs4 = qs3.filter(parent__in=qs1)
self.assertEqual(qs4.query.subq_aliases, {"T", "U", "V"})
# It is possible to reuse U for the second subquery, no need to use W.
self.assertNotIn("w0", str(qs4.query).lower())
# So, 'U0."id"' is referenced in SELECT and WHERE twice.
self.assertEqual(str(qs4.query).lower().count("u0."), 4)
def test_ticket1050(self):
self.assertSequenceEqual(
Item.objects.filter(tags__isnull=True),
[self.i3],
)
self.assertSequenceEqual(
Item.objects.filter(tags__id__isnull=True),
[self.i3],
)
def test_ticket1801(self):
self.assertSequenceEqual(
Author.objects.filter(item=self.i2),
[self.a2],
)
self.assertSequenceEqual(
Author.objects.filter(item=self.i3),
[self.a2],
)
self.assertSequenceEqual(
Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3),
[self.a2],
)
def test_ticket2306(self):
# Checking that no join types are "left outer" joins.
query = Item.objects.filter(tags=self.t2).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1)).order_by("name"),
[self.i1, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)),
[self.i1],
)
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1)).filter(
Q(creator__name="fred") | Q(tags=self.t2)
),
[self.i1],
)
# Each filter call is processed "at once" against a single table, so this is
# different from the previous example as it tries to find tags that are two
# things at once (rather than two tags).
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)), []
)
self.assertSequenceEqual(
Item.objects.filter(
Q(tags=self.t1), Q(creator__name="fred") | Q(tags=self.t2)
),
[],
)
qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id)
self.assertSequenceEqual(list(qs), [self.a2])
self.assertEqual(2, qs.query.count_active_tables(), 2)
qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id)
self.assertEqual(qs.query.count_active_tables(), 3)
def test_ticket4464(self):
self.assertSequenceEqual(
Item.objects.filter(tags=self.t1).filter(tags=self.t2),
[self.i1],
)
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2])
.distinct()
.order_by("name"),
[self.i1, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3),
[self.i2],
)
# Make sure .distinct() works with slicing (this was broken in Oracle).
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).order_by("name")[:3],
[self.i1, self.i1, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2])
.distinct()
.order_by("name")[:3],
[self.i1, self.i2],
)
def test_tickets_2080_3592(self):
self.assertSequenceEqual(
Author.objects.filter(item__name="one") | Author.objects.filter(name="a3"),
[self.a1, self.a3],
)
self.assertSequenceEqual(
Author.objects.filter(Q(item__name="one") | Q(name="a3")),
[self.a1, self.a3],
)
self.assertSequenceEqual(
Author.objects.filter(Q(name="a3") | Q(item__name="one")),
[self.a1, self.a3],
)
self.assertSequenceEqual(
Author.objects.filter(Q(item__name="three") | Q(report__name="r3")),
[self.a2],
)
def test_ticket6074(self):
# Merging two empty result sets shouldn't leave a queryset with no constraints
# (which would match everything).
self.assertSequenceEqual(Author.objects.filter(Q(id__in=[])), [])
self.assertSequenceEqual(Author.objects.filter(Q(id__in=[]) | Q(id__in=[])), [])
def test_tickets_1878_2939(self):
self.assertEqual(Item.objects.values("creator").distinct().count(), 3)
# Create something with a duplicate 'name' so that we can test multi-column
# cases (which require some tricky SQL transformations under the covers).
xx = Item(name="four", created=self.time1, creator=self.a2, note=self.n1)
xx.save()
self.assertEqual(
Item.objects.exclude(name="two")
.values("creator", "name")
.distinct()
.count(),
4,
)
self.assertEqual(
(
Item.objects.exclude(name="two")
.extra(select={"foo": "%s"}, select_params=(1,))
.values("creator", "name", "foo")
.distinct()
.count()
),
4,
)
self.assertEqual(
(
Item.objects.exclude(name="two")
.extra(select={"foo": "%s"}, select_params=(1,))
.values("creator", "name")
.distinct()
.count()
),
4,
)
xx.delete()
def test_ticket7323(self):
self.assertEqual(Item.objects.values("creator", "name").count(), 4)
def test_ticket2253(self):
q1 = Item.objects.order_by("name")
q2 = Item.objects.filter(id=self.i1.id)
self.assertSequenceEqual(q1, [self.i4, self.i1, self.i3, self.i2])
self.assertSequenceEqual(q2, [self.i1])
self.assertSequenceEqual(
(q1 | q2).order_by("name"),
[self.i4, self.i1, self.i3, self.i2],
)
self.assertSequenceEqual((q1 & q2).order_by("name"), [self.i1])
q1 = Item.objects.filter(tags=self.t1)
q2 = Item.objects.filter(note=self.n3, tags=self.t2)
q3 = Item.objects.filter(creator=self.a4)
self.assertSequenceEqual(
((q1 & q2) | q3).order_by("name"),
[self.i4, self.i1],
)
def test_order_by_tables(self):
q1 = Item.objects.order_by("name")
q2 = Item.objects.filter(id=self.i1.id)
list(q2)
combined_query = (q1 & q2).order_by("name").query
self.assertEqual(
len(
[
t
for t in combined_query.alias_map
if combined_query.alias_refcount[t]
]
),
1,
)
def test_order_by_join_unref(self):
"""
This test is related to the above one, testing that there aren't
old JOINs in the query.
"""
qs = Celebrity.objects.order_by("greatest_fan__fan_of")
self.assertIn("OUTER JOIN", str(qs.query))
qs = qs.order_by("id")
self.assertNotIn("OUTER JOIN", str(qs.query))
def test_order_by_related_field_transform(self):
extra_12 = ExtraInfo.objects.create(
info="extra 12",
date=DateTimePK.objects.create(date=datetime.datetime(2021, 12, 10)),
)
extra_11 = ExtraInfo.objects.create(
info="extra 11",
date=DateTimePK.objects.create(date=datetime.datetime(2022, 11, 10)),
)
self.assertSequenceEqual(
ExtraInfo.objects.filter(date__isnull=False).order_by("date__month"),
[extra_11, extra_12],
)
def test_filter_by_related_field_transform(self):
extra_old = ExtraInfo.objects.create(
info="extra 12",
date=DateTimePK.objects.create(date=datetime.datetime(2020, 12, 10)),
)
ExtraInfo.objects.create(info="extra 11", date=DateTimePK.objects.create())
a5 = Author.objects.create(name="a5", num=5005, extra=extra_old)
fk_field = ExtraInfo._meta.get_field("date")
with register_lookup(fk_field, ExtractYear):
self.assertSequenceEqual(
ExtraInfo.objects.filter(date__year=2020),
[extra_old],
)
self.assertSequenceEqual(
Author.objects.filter(extra__date__year=2020), [a5]
)
def test_filter_by_related_field_nested_transforms(self):
extra = ExtraInfo.objects.create(info=" extra")
a5 = Author.objects.create(name="a5", num=5005, extra=extra)
info_field = ExtraInfo._meta.get_field("info")
with register_lookup(info_field, Length), register_lookup(CharField, LTrim):
self.assertSequenceEqual(
Author.objects.filter(extra__info__ltrim__length=5), [a5]
)
def test_get_clears_ordering(self):
"""
get() should clear ordering for optimization purposes.
"""
with CaptureQueriesContext(connection) as captured_queries:
Author.objects.order_by("name").get(pk=self.a1.pk)
self.assertNotIn("order by", captured_queries[0]["sql"].lower())
def test_tickets_4088_4306(self):
self.assertSequenceEqual(Report.objects.filter(creator=1001), [self.r1])
self.assertSequenceEqual(Report.objects.filter(creator__num=1001), [self.r1])
self.assertSequenceEqual(Report.objects.filter(creator__id=1001), [])
self.assertSequenceEqual(
Report.objects.filter(creator__id=self.a1.id), [self.r1]
)
self.assertSequenceEqual(Report.objects.filter(creator__name="a1"), [self.r1])
def test_ticket4510(self):
self.assertSequenceEqual(
Author.objects.filter(report__name="r1"),
[self.a1],
)
def test_ticket7378(self):
self.assertSequenceEqual(self.a1.report_set.all(), [self.r1])
def test_tickets_5324_6704(self):
self.assertSequenceEqual(
Item.objects.filter(tags__name="t4"),
[self.i4],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t4").order_by("name").distinct(),
[self.i1, self.i3, self.i2],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t4").order_by("name").distinct().reverse(),
[self.i2, self.i3, self.i1],
)
self.assertSequenceEqual(
Author.objects.exclude(item__name="one").distinct().order_by("name"),
[self.a2, self.a3, self.a4],
)
# Excluding across a m2m relation when there is more than one related
# object associated was problematic.
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t1").order_by("name"),
[self.i4, self.i3],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t1").exclude(tags__name="t4"),
[self.i3],
)
# Excluding from a relation that cannot be NULL should not use outer joins.
query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
# Similarly, when one of the joins cannot possibly, ever, involve NULL
# values (Author -> ExtraInfo, in the following), it should never be
# promoted to a left outer join. So the following query should only
# involve one "left outer" join (Author -> Item is 0-to-many).
qs = Author.objects.filter(id=self.a1.id).filter(
Q(extra__note=self.n1) | Q(item__note=self.n3)
)
self.assertEqual(
len(
[
x
for x in qs.query.alias_map.values()
if x.join_type == LOUTER and qs.query.alias_refcount[x.table_alias]
]
),
1,
)
# The previous changes shouldn't affect nullable foreign key joins.
self.assertSequenceEqual(
Tag.objects.filter(parent__isnull=True).order_by("name"), [self.t1]
)
self.assertSequenceEqual(
Tag.objects.exclude(parent__isnull=True).order_by("name"),
[self.t2, self.t3, self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.exclude(Q(parent__name="t1") | Q(parent__isnull=True)).order_by(
"name"
),
[self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name="t1")).order_by(
"name"
),
[self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by("name"),
[self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by("name"),
[self.t4, self.t5],
)
def test_ticket2091(self):
t = Tag.objects.get(name="t4")
self.assertSequenceEqual(Item.objects.filter(tags__in=[t]), [self.i4])
def test_avoid_infinite_loop_on_too_many_subqueries(self):
x = Tag.objects.filter(pk=1)
local_recursion_limit = sys.getrecursionlimit() // 16
msg = "Maximum recursion depth exceeded: too many subqueries."
with self.assertRaisesMessage(RecursionError, msg):
for i in range(local_recursion_limit + 2):
x = Tag.objects.filter(pk__in=x)
def test_reasonable_number_of_subq_aliases(self):
x = Tag.objects.filter(pk=1)
for _ in range(20):
x = Tag.objects.filter(pk__in=x)
self.assertEqual(
x.query.subq_aliases,
{
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"AA",
"AB",
"AC",
"AD",
"AE",
"AF",
"AG",
"AH",
"AI",
"AJ",
"AK",
"AL",
"AM",
"AN",
},
)
def test_heterogeneous_qs_combination(self):
# Combining querysets built on different models should behave in a well-defined
# fashion. We raise an error.
msg = "Cannot combine queries on two different base models."
with self.assertRaisesMessage(TypeError, msg):
Author.objects.all() & Tag.objects.all()
with self.assertRaisesMessage(TypeError, msg):
Author.objects.all() | Tag.objects.all()
def test_ticket3141(self):
self.assertEqual(Author.objects.extra(select={"foo": "1"}).count(), 4)
self.assertEqual(
Author.objects.extra(select={"foo": "%s"}, select_params=(1,)).count(), 4
)
def test_ticket2400(self):
self.assertSequenceEqual(
Author.objects.filter(item__isnull=True),
[self.a3],
)
self.assertSequenceEqual(
Tag.objects.filter(item__isnull=True),
[self.t5],
)
def test_ticket2496(self):
self.assertSequenceEqual(
Item.objects.extra(tables=["queries_author"])
.select_related()
.order_by("name")[:1],
[self.i4],
)
def test_error_raised_on_filter_with_dictionary(self):
with self.assertRaisesMessage(FieldError, "Cannot parse keyword query as dict"):
Note.objects.filter({"note": "n1", "misc": "foo"})
def test_tickets_2076_7256(self):
# Ordering on related tables should be possible, even if the table is
# not otherwise involved.
self.assertSequenceEqual(
Item.objects.order_by("note__note", "name"),
[self.i2, self.i4, self.i1, self.i3],
)
# Ordering on a related field should use the remote model's default
# ordering as a final step.
self.assertSequenceEqual(
Author.objects.order_by("extra", "-name"),
[self.a2, self.a1, self.a4, self.a3],
)
# Using remote model default ordering can span multiple models (in this
# case, Cover is ordered by Item's default, which uses Note's default).
self.assertSequenceEqual(Cover.objects.all(), [self.c1, self.c2])
# If the remote model does not have a default ordering, we order by its 'id'
# field.
self.assertSequenceEqual(
Item.objects.order_by("creator", "name"),
[self.i1, self.i3, self.i2, self.i4],
)
# Ordering by a many-valued attribute (e.g. a many-to-many or reverse
# ForeignKey) is legal, but the results might not make sense. That
# isn't Django's problem. Garbage in, garbage out.
self.assertSequenceEqual(
Item.objects.filter(tags__isnull=False).order_by("tags", "id"),
[self.i1, self.i2, self.i1, self.i2, self.i4],
)
# If we replace the default ordering, Django adjusts the required
# tables automatically. Item normally requires a join with Note to do
# the default ordering, but that isn't needed here.
qs = Item.objects.order_by("name")
self.assertSequenceEqual(qs, [self.i4, self.i1, self.i3, self.i2])
self.assertEqual(len(qs.query.alias_map), 1)
def test_tickets_2874_3002(self):
qs = Item.objects.select_related().order_by("note__note", "name")
self.assertQuerysetEqual(qs, [self.i2, self.i4, self.i1, self.i3])
# This is also a good select_related() test because there are multiple
# Note entries in the SQL. The two Note items should be different.
self.assertEqual(repr(qs[0].note), "<Note: n2>")
self.assertEqual(repr(qs[0].creator.extra.note), "<Note: n1>")
def test_ticket3037(self):
self.assertSequenceEqual(
Item.objects.filter(
Q(creator__name="a3", name="two") | Q(creator__name="a4", name="four")
),
[self.i4],
)
def test_tickets_5321_7070(self):
# Ordering columns must be included in the output columns. Note that
# this means results that might otherwise be distinct are not (if there
# are multiple values in the ordering cols), as in this example. This
# isn't a bug; it's a warning to be careful with the selection of
# ordering columns.
self.assertSequenceEqual(
Note.objects.values("misc").distinct().order_by("note", "-misc"),
[{"misc": "foo"}, {"misc": "bar"}, {"misc": "foo"}],
)
def test_ticket4358(self):
# If you don't pass any fields to values(), relation fields are
# returned as "foo_id" keys, not "foo". For consistency, you should be
# able to pass "foo_id" in the fields list and have it work, too. We
# actually allow both "foo" and "foo_id".
# The *_id version is returned by default.
self.assertIn("note_id", ExtraInfo.objects.values()[0])
# You can also pass it in explicitly.
self.assertSequenceEqual(
ExtraInfo.objects.values("note_id"), [{"note_id": 1}, {"note_id": 2}]
)
# ...or use the field name.
self.assertSequenceEqual(
ExtraInfo.objects.values("note"), [{"note": 1}, {"note": 2}]
)
def test_ticket6154(self):
# Multiple filter statements are joined using "AND" all the time.
self.assertSequenceEqual(
Author.objects.filter(id=self.a1.id).filter(
Q(extra__note=self.n1) | Q(item__note=self.n3)
),
[self.a1],
)
self.assertSequenceEqual(
Author.objects.filter(
Q(extra__note=self.n1) | Q(item__note=self.n3)
).filter(id=self.a1.id),
[self.a1],
)
def test_ticket6981(self):
self.assertSequenceEqual(
Tag.objects.select_related("parent").order_by("name"),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
def test_ticket9926(self):
self.assertSequenceEqual(
Tag.objects.select_related("parent", "category").order_by("name"),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.select_related("parent", "parent__category").order_by("name"),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
def test_tickets_6180_6203(self):
# Dates with limits and/or counts
self.assertEqual(Item.objects.count(), 4)
self.assertEqual(Item.objects.datetimes("created", "month").count(), 1)
self.assertEqual(Item.objects.datetimes("created", "day").count(), 2)
self.assertEqual(len(Item.objects.datetimes("created", "day")), 2)
self.assertEqual(
Item.objects.datetimes("created", "day")[0],
datetime.datetime(2007, 12, 19, 0, 0),
)
def test_tickets_7087_12242(self):
# Dates with extra select columns
self.assertSequenceEqual(
Item.objects.datetimes("created", "day").extra(select={"a": 1}),
[
datetime.datetime(2007, 12, 19, 0, 0),
datetime.datetime(2007, 12, 20, 0, 0),
],
)
self.assertSequenceEqual(
Item.objects.extra(select={"a": 1}).datetimes("created", "day"),
[
datetime.datetime(2007, 12, 19, 0, 0),
datetime.datetime(2007, 12, 20, 0, 0),
],
)
name = "one"
self.assertSequenceEqual(
Item.objects.datetimes("created", "day").extra(
where=["name=%s"], params=[name]
),
[datetime.datetime(2007, 12, 19, 0, 0)],
)
self.assertSequenceEqual(
Item.objects.extra(where=["name=%s"], params=[name]).datetimes(
"created", "day"
),
[datetime.datetime(2007, 12, 19, 0, 0)],
)
def test_ticket7155(self):
# Nullable dates
self.assertSequenceEqual(
Item.objects.datetimes("modified", "day"),
[datetime.datetime(2007, 12, 19, 0, 0)],
)
def test_order_by_rawsql(self):
self.assertSequenceEqual(
Item.objects.values("note__note").order_by(
RawSQL("queries_note.note", ()),
"id",
),
[
{"note__note": "n2"},
{"note__note": "n3"},
{"note__note": "n3"},
{"note__note": "n3"},
],
)
def test_ticket7096(self):
# Make sure exclude() with multiple conditions continues to work.
self.assertSequenceEqual(
Tag.objects.filter(parent=self.t1, name="t3").order_by("name"),
[self.t3],
)
self.assertSequenceEqual(
Tag.objects.exclude(parent=self.t1, name="t3").order_by("name"),
[self.t1, self.t2, self.t4, self.t5],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t1", name="one")
.order_by("name")
.distinct(),
[self.i4, self.i3, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(name__in=["three", "four"])
.exclude(tags__name="t1")
.order_by("name"),
[self.i4, self.i3],
)
# More twisted cases, involving nested negations.
self.assertSequenceEqual(
Item.objects.exclude(~Q(tags__name="t1", name="one")),
[self.i1],
)
self.assertSequenceEqual(
Item.objects.filter(~Q(tags__name="t1", name="one"), name="two"),
[self.i2],
)
self.assertSequenceEqual(
Item.objects.exclude(~Q(tags__name="t1", name="one"), name="two"),
[self.i4, self.i1, self.i3],
)
def test_tickets_7204_7506(self):
# Make sure querysets with related fields can be pickled. If this
# doesn't crash, it's a Good Thing.
pickle.dumps(Item.objects.all())
def test_ticket7813(self):
# We should also be able to pickle things that use select_related().
# The only tricky thing here is to ensure that we do the related
# selections properly after unpickling.
qs = Item.objects.select_related()
query = qs.query.get_compiler(qs.db).as_sql()[0]
query2 = pickle.loads(pickle.dumps(qs.query))
self.assertEqual(query2.get_compiler(qs.db).as_sql()[0], query)
def test_deferred_load_qs_pickling(self):
# Check pickling of deferred-loading querysets
qs = Item.objects.defer("name", "creator")
q2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(list(qs), list(q2))
q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL))
self.assertEqual(list(qs), list(q3))
def test_ticket7277(self):
self.assertSequenceEqual(
self.n1.annotation_set.filter(
Q(tag=self.t5)
| Q(tag__children=self.t5)
| Q(tag__children__children=self.t5)
),
[self.ann1],
)
def test_tickets_7448_7707(self):
# Complex objects should be converted to strings before being used in
# lookups.
self.assertSequenceEqual(
Item.objects.filter(created__in=[self.time1, self.time2]),
[self.i1, self.i2],
)
def test_ticket7235(self):
# An EmptyQuerySet should not raise exceptions if it is filtered.
Eaten.objects.create(meal="m")
q = Eaten.objects.none()
with self.assertNumQueries(0):
self.assertQuerysetEqual(q.all(), [])
self.assertQuerysetEqual(q.filter(meal="m"), [])
self.assertQuerysetEqual(q.exclude(meal="m"), [])
self.assertQuerysetEqual(q.complex_filter({"pk": 1}), [])
self.assertQuerysetEqual(q.select_related("food"), [])
self.assertQuerysetEqual(q.annotate(Count("food")), [])
self.assertQuerysetEqual(q.order_by("meal", "food"), [])
self.assertQuerysetEqual(q.distinct(), [])
self.assertQuerysetEqual(q.extra(select={"foo": "1"}), [])
self.assertQuerysetEqual(q.reverse(), [])
q.query.low_mark = 1
msg = "Cannot change a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
q.extra(select={"foo": "1"})
self.assertQuerysetEqual(q.defer("meal"), [])
self.assertQuerysetEqual(q.only("meal"), [])
def test_ticket7791(self):
# There were "issues" when ordering and distinct-ing on fields related
# via ForeignKeys.
self.assertEqual(len(Note.objects.order_by("extrainfo__info").distinct()), 3)
# Pickling of QuerySets using datetimes() should work.
qs = Item.objects.datetimes("created", "month")
pickle.loads(pickle.dumps(qs))
def test_ticket9997(self):
# If a ValuesList or Values queryset is passed as an inner query, we
# make sure it's only requesting a single value and use that as the
# thing to select.
self.assertSequenceEqual(
Tag.objects.filter(
name__in=Tag.objects.filter(parent=self.t1).values("name")
),
[self.t2, self.t3],
)
# Multi-valued values() and values_list() querysets should raise errors.
with self.assertRaisesMessage(
TypeError, "Cannot use multi-field values as a filter value."
):
Tag.objects.filter(
name__in=Tag.objects.filter(parent=self.t1).values("name", "id")
)
with self.assertRaisesMessage(
TypeError, "Cannot use multi-field values as a filter value."
):
Tag.objects.filter(
name__in=Tag.objects.filter(parent=self.t1).values_list("name", "id")
)
def test_ticket9985(self):
# qs.values_list(...).values(...) combinations should work.
self.assertSequenceEqual(
Note.objects.values_list("note", flat=True).values("id").order_by("id"),
[{"id": 1}, {"id": 2}, {"id": 3}],
)
self.assertSequenceEqual(
Annotation.objects.filter(
notes__in=Note.objects.filter(note="n1")
.values_list("note")
.values("id")
),
[self.ann1],
)
def test_ticket10205(self):
# When bailing out early because of an empty "__in" filter, we need
# to set things up correctly internally so that subqueries can continue
# properly.
self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0)
def test_ticket10432(self):
# Testing an empty "__in" filter with a generator as the value.
def f():
return iter([])
n_obj = Note.objects.all()[0]
def g():
yield n_obj.pk
self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), [])
self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj])
def test_ticket10742(self):
# Queries used in an __in clause don't execute subqueries
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.filter(pk__in=subq)
self.assertSequenceEqual(qs, [self.a1, self.a2])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.exclude(pk__in=subq)
self.assertSequenceEqual(qs, [self.a3, self.a4])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
self.assertSequenceEqual(
Author.objects.filter(Q(pk__in=subq) & Q(name="a1")),
[self.a1],
)
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
def test_ticket7076(self):
# Excluding shouldn't eliminate NULL entries.
self.assertSequenceEqual(
Item.objects.exclude(modified=self.time1).order_by("name"),
[self.i4, self.i3, self.i2],
)
self.assertSequenceEqual(
Tag.objects.exclude(parent__name=self.t1.name),
[self.t1, self.t4, self.t5],
)
def test_ticket7181(self):
# Ordering by related tables should accommodate nullable fields (this
# test is a little tricky, since NULL ordering is database dependent.
# Instead, we just count the number of results).
self.assertEqual(len(Tag.objects.order_by("parent__name")), 5)
# Empty querysets can be merged with others.
self.assertSequenceEqual(
Note.objects.none() | Note.objects.all(),
[self.n1, self.n2, self.n3],
)
self.assertSequenceEqual(
Note.objects.all() | Note.objects.none(),
[self.n1, self.n2, self.n3],
)
self.assertSequenceEqual(Note.objects.none() & Note.objects.all(), [])
self.assertSequenceEqual(Note.objects.all() & Note.objects.none(), [])
def test_ticket8439(self):
# Complex combinations of conjunctions, disjunctions and nullable
# relations.
self.assertSequenceEqual(
Author.objects.filter(
Q(item__note__extrainfo=self.e2) | Q(report=self.r1, name="xyz")
),
[self.a2],
)
self.assertSequenceEqual(
Author.objects.filter(
Q(report=self.r1, name="xyz") | Q(item__note__extrainfo=self.e2)
),
[self.a2],
)
self.assertSequenceEqual(
Annotation.objects.filter(
Q(tag__parent=self.t1) | Q(notes__note="n1", name="a1")
),
[self.ann1],
)
xx = ExtraInfo.objects.create(info="xx", note=self.n3)
self.assertSequenceEqual(
Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)),
[self.n1, self.n3],
)
q = Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)).query
self.assertEqual(
len(
[
x
for x in q.alias_map.values()
if x.join_type == LOUTER and q.alias_refcount[x.table_alias]
]
),
1,
)
def test_ticket17429(self):
"""
Meta.ordering=None works the same as Meta.ordering=[]
"""
original_ordering = Tag._meta.ordering
Tag._meta.ordering = None
try:
self.assertCountEqual(
Tag.objects.all(),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
finally:
Tag._meta.ordering = original_ordering
def test_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(tags__name="t4"),
Item.objects.filter(~Q(tags__name="t4")),
)
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name="t4") | Q(tags__name="t3")),
Item.objects.filter(~(Q(tags__name="t4") | Q(tags__name="t3"))),
)
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name="t4") | ~Q(tags__name="t3")),
Item.objects.filter(~(Q(tags__name="t4") | ~Q(tags__name="t3"))),
)
def test_nested_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name="t4")),
Item.objects.filter(~~Q(tags__name="t4")),
)
def test_double_exclude(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name="t4")),
Item.objects.filter(~~Q(tags__name="t4")),
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name="t4")),
Item.objects.filter(~Q(~Q(tags__name="t4"))),
)
def test_exclude_in(self):
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name__in=["t4", "t3"])),
Item.objects.filter(~Q(tags__name__in=["t4", "t3"])),
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name__in=["t4", "t3"])),
Item.objects.filter(~~Q(tags__name__in=["t4", "t3"])),
)
def test_ticket_10790_1(self):
# Querying direct fields with isnull should trim the left outer join.
# It also should not create INNER JOIN.
q = Tag.objects.filter(parent__isnull=True)
self.assertSequenceEqual(q, [self.t1])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.filter(parent__isnull=False)
self.assertSequenceEqual(q, [self.t2, self.t3, self.t4, self.t5])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.exclude(parent__isnull=True)
self.assertSequenceEqual(q, [self.t2, self.t3, self.t4, self.t5])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.exclude(parent__isnull=False)
self.assertSequenceEqual(q, [self.t1])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.exclude(parent__parent__isnull=False)
self.assertSequenceEqual(q, [self.t1, self.t2, self.t3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
self.assertNotIn("INNER JOIN", str(q.query))
def test_ticket_10790_2(self):
# Querying across several tables should strip only the last outer join,
# while preserving the preceding inner joins.
q = Tag.objects.filter(parent__parent__isnull=False)
self.assertSequenceEqual(q, [self.t4, self.t5])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
# Querying without isnull should not convert anything to left outer join.
q = Tag.objects.filter(parent__parent=self.t1)
self.assertSequenceEqual(q, [self.t4, self.t5])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
def test_ticket_10790_3(self):
# Querying via indirect fields should populate the left outer join
q = NamedCategory.objects.filter(tag__isnull=True)
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
# join to dumbcategory ptr_id
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
self.assertSequenceEqual(q, [])
# Querying across several tables should strip only the last join, while
# preserving the preceding left outer joins.
q = NamedCategory.objects.filter(tag__parent__isnull=True)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
self.assertSequenceEqual(q, [self.nc1])
def test_ticket_10790_4(self):
# Querying across m2m field should not strip the m2m table from join.
q = Author.objects.filter(item__tags__isnull=True)
self.assertSequenceEqual(q, [self.a2, self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 2)
self.assertNotIn("INNER JOIN", str(q.query))
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 3)
self.assertNotIn("INNER JOIN", str(q.query))
def test_ticket_10790_5(self):
# Querying with isnull=False across m2m field should not create outer joins
q = Author.objects.filter(item__tags__isnull=False)
self.assertSequenceEqual(q, [self.a1, self.a1, self.a2, self.a2, self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 2)
q = Author.objects.filter(item__tags__parent__isnull=False)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 3)
q = Author.objects.filter(item__tags__parent__parent__isnull=False)
self.assertSequenceEqual(q, [self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 4)
def test_ticket_10790_6(self):
# Querying with isnull=True across m2m field should not create inner joins
# and strip last outer join
q = Author.objects.filter(item__tags__parent__parent__isnull=True)
self.assertSequenceEqual(
q,
[self.a1, self.a1, self.a2, self.a2, self.a2, self.a3],
)
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 4)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 3)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
def test_ticket_10790_7(self):
# Reverse querying with isnull should not strip the join
q = Author.objects.filter(item__isnull=True)
self.assertSequenceEqual(q, [self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
q = Author.objects.filter(item__isnull=False)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
def test_ticket_10790_8(self):
# Querying with combined q-objects should also strip the left outer join
q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1))
self.assertSequenceEqual(q, [self.t1, self.t2, self.t3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
def test_ticket_10790_combine(self):
# Combining queries should not re-populate the left outer join
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__isnull=False)
q3 = q1 | q2
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3, self.t4, self.t5])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q3 = q1 & q2
self.assertSequenceEqual(q3, [])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q2 = Tag.objects.filter(parent=self.t1)
q3 = q1 | q2
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q3 = q2 | q1
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__parent__isnull=True)
q3 = q1 | q2
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q3 = q2 | q1
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
def test_ticket19672(self):
self.assertSequenceEqual(
Report.objects.filter(
Q(creator__isnull=False) & ~Q(creator__extra__value=41)
),
[self.r1],
)
def test_ticket_20250(self):
# A negated Q along with an annotated queryset failed in Django 1.4
qs = Author.objects.annotate(Count("item"))
qs = qs.filter(~Q(extra__value=0)).order_by("name")
self.assertIn("SELECT", str(qs.query))
self.assertSequenceEqual(qs, [self.a1, self.a2, self.a3, self.a4])
def test_lookup_constraint_fielderror(self):
msg = (
"Cannot resolve keyword 'unknown_field' into field. Choices are: "
"annotation, category, category_id, children, id, item, "
"managedmodel, name, note, parent, parent_id"
)
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.filter(unknown_field__name="generic")
def test_common_mixed_case_foreign_keys(self):
"""
Valid query should be generated when fields fetched from joined tables
include FKs whose names only differ by case.
"""
c1 = SimpleCategory.objects.create(name="c1")
c2 = SimpleCategory.objects.create(name="c2")
c3 = SimpleCategory.objects.create(name="c3")
category = CategoryItem.objects.create(category=c1)
mixed_case_field_category = MixedCaseFieldCategoryItem.objects.create(
CaTeGoRy=c2
)
mixed_case_db_column_category = MixedCaseDbColumnCategoryItem.objects.create(
category=c3
)
CommonMixedCaseForeignKeys.objects.create(
category=category,
mixed_case_field_category=mixed_case_field_category,
mixed_case_db_column_category=mixed_case_db_column_category,
)
qs = CommonMixedCaseForeignKeys.objects.values(
"category",
"mixed_case_field_category",
"mixed_case_db_column_category",
"category__category",
"mixed_case_field_category__CaTeGoRy",
"mixed_case_db_column_category__category",
)
self.assertTrue(qs.first())
def test_excluded_intermediary_m2m_table_joined(self):
self.assertSequenceEqual(
Note.objects.filter(~Q(tag__annotation__name=F("note"))),
[self.n1, self.n2, self.n3],
)
self.assertSequenceEqual(
Note.objects.filter(tag__annotation__name="a1").filter(
~Q(tag__annotation__name=F("note"))
),
[],
)
def test_field_with_filterable(self):
self.assertSequenceEqual(
Author.objects.filter(extra=self.e2),
[self.a3, self.a4],
)
def test_negate_field(self):
self.assertSequenceEqual(
Note.objects.filter(negate=True),
[self.n1, self.n2],
)
self.assertSequenceEqual(Note.objects.exclude(negate=True), [self.n3])
class Queries2Tests(TestCase):
@classmethod
def setUpTestData(cls):
cls.num4 = Number.objects.create(num=4)
cls.num8 = Number.objects.create(num=8)
cls.num12 = Number.objects.create(num=12)
def test_ticket4289(self):
# A slight variation on the restricting the filtering choices by the
# lookup constraints.
self.assertSequenceEqual(Number.objects.filter(num__lt=4), [])
self.assertSequenceEqual(Number.objects.filter(num__gt=8, num__lt=12), [])
self.assertSequenceEqual(
Number.objects.filter(num__gt=8, num__lt=13),
[self.num12],
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)), []
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)), []
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)), []
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)),
[self.num8],
)
def test_ticket12239(self):
# Custom lookups are registered to round float values correctly on gte
# and lt IntegerField queries.
self.assertSequenceEqual(
Number.objects.filter(num__gt=11.9),
[self.num12],
)
self.assertSequenceEqual(Number.objects.filter(num__gt=12), [])
self.assertSequenceEqual(Number.objects.filter(num__gt=12.0), [])
self.assertSequenceEqual(Number.objects.filter(num__gt=12.1), [])
self.assertCountEqual(
Number.objects.filter(num__lt=12),
[self.num4, self.num8],
)
self.assertCountEqual(
Number.objects.filter(num__lt=12.0),
[self.num4, self.num8],
)
self.assertCountEqual(
Number.objects.filter(num__lt=12.1),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__gte=11.9),
[self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__gte=12),
[self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__gte=12.0),
[self.num12],
)
self.assertSequenceEqual(Number.objects.filter(num__gte=12.1), [])
self.assertSequenceEqual(Number.objects.filter(num__gte=12.9), [])
self.assertCountEqual(
Number.objects.filter(num__lte=11.9),
[self.num4, self.num8],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12.0),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12.1),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12.9),
[self.num4, self.num8, self.num12],
)
def test_ticket7759(self):
# Count should work with a partially read result set.
count = Number.objects.count()
qs = Number.objects.all()
def run():
for obj in qs:
return qs.count() == count
self.assertTrue(run())
class Queries3Tests(TestCase):
def test_ticket7107(self):
# This shouldn't create an infinite loop.
self.assertQuerysetEqual(Valid.objects.all(), [])
def test_datetimes_invalid_field(self):
# An error should be raised when QuerySet.datetimes() is passed the
# wrong type of field.
msg = "'name' isn't a DateField, TimeField, or DateTimeField."
with self.assertRaisesMessage(TypeError, msg):
Item.objects.datetimes("name", "month")
def test_ticket22023(self):
with self.assertRaisesMessage(
TypeError, "Cannot call only() after .values() or .values_list()"
):
Valid.objects.values().only()
with self.assertRaisesMessage(
TypeError, "Cannot call defer() after .values() or .values_list()"
):
Valid.objects.values().defer()
class Queries4Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name="t1", category=generic)
n1 = Note.objects.create(note="n1", misc="foo")
n2 = Note.objects.create(note="n2", misc="bar")
e1 = ExtraInfo.objects.create(info="e1", note=n1)
e2 = ExtraInfo.objects.create(info="e2", note=n2)
cls.a1 = Author.objects.create(name="a1", num=1001, extra=e1)
cls.a3 = Author.objects.create(name="a3", num=3003, extra=e2)
cls.r1 = Report.objects.create(name="r1", creator=cls.a1)
cls.r2 = Report.objects.create(name="r2", creator=cls.a3)
cls.r3 = Report.objects.create(name="r3")
cls.i1 = Item.objects.create(
name="i1", created=datetime.datetime.now(), note=n1, creator=cls.a1
)
cls.i2 = Item.objects.create(
name="i2", created=datetime.datetime.now(), note=n1, creator=cls.a3
)
def test_ticket24525(self):
tag = Tag.objects.create()
anth100 = tag.note_set.create(note="ANTH", misc="100")
math101 = tag.note_set.create(note="MATH", misc="101")
s1 = tag.annotation_set.create(name="1")
s2 = tag.annotation_set.create(name="2")
s1.notes.set([math101, anth100])
s2.notes.set([math101])
result = math101.annotation_set.all() & tag.annotation_set.exclude(
notes__in=[anth100]
)
self.assertEqual(list(result), [s2])
def test_ticket11811(self):
unsaved_category = NamedCategory(name="Other")
msg = (
"Unsaved model instance <NamedCategory: Other> cannot be used in an ORM "
"query."
)
with self.assertRaisesMessage(ValueError, msg):
Tag.objects.filter(pk=self.t1.pk).update(category=unsaved_category)
def test_ticket14876(self):
# Note: when combining the query we need to have information available
# about the join type of the trimmed "creator__isnull" join. If we
# don't have that information, then the join is created as INNER JOIN
# and results will be incorrect.
q1 = Report.objects.filter(
Q(creator__isnull=True) | Q(creator__extra__info="e1")
)
q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter(
Q(creator__extra__info="e1")
)
self.assertCountEqual(q1, [self.r1, self.r3])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Report.objects.filter(
Q(creator__extra__info="e1") | Q(creator__isnull=True)
)
q2 = Report.objects.filter(
Q(creator__extra__info="e1")
) | Report.objects.filter(Q(creator__isnull=True))
self.assertCountEqual(q1, [self.r1, self.r3])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(
Q(creator=self.a1) | Q(creator__report__name="r1")
).order_by()
q2 = (
Item.objects.filter(Q(creator=self.a1)).order_by()
| Item.objects.filter(Q(creator__report__name="r1")).order_by()
)
self.assertCountEqual(q1, [self.i1])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(
Q(creator__report__name="e1") | Q(creator=self.a1)
).order_by()
q2 = (
Item.objects.filter(Q(creator__report__name="e1")).order_by()
| Item.objects.filter(Q(creator=self.a1)).order_by()
)
self.assertCountEqual(q1, [self.i1])
self.assertEqual(str(q1.query), str(q2.query))
def test_combine_join_reuse(self):
# Joins having identical connections are correctly recreated in the
# rhs query, in case the query is ORed together (#18748).
Report.objects.create(name="r4", creator=self.a1)
q1 = Author.objects.filter(report__name="r5")
q2 = Author.objects.filter(report__name="r4").filter(report__name="r1")
combined = q1 | q2
self.assertEqual(str(combined.query).count("JOIN"), 2)
self.assertEqual(len(combined), 1)
self.assertEqual(combined[0].name, "a1")
def test_combine_or_filter_reuse(self):
combined = Author.objects.filter(name="a1") | Author.objects.filter(name="a3")
self.assertEqual(combined.get(name="a1"), self.a1)
def test_join_reuse_order(self):
# Join aliases are reused in order. This shouldn't raise AssertionError
# because change_map contains a circular reference (#26522).
s1 = School.objects.create()
s2 = School.objects.create()
s3 = School.objects.create()
t1 = Teacher.objects.create()
otherteachers = Teacher.objects.exclude(pk=t1.pk).exclude(friends=t1)
qs1 = otherteachers.filter(schools=s1).filter(schools=s2)
qs2 = otherteachers.filter(schools=s1).filter(schools=s3)
self.assertQuerysetEqual(qs1 | qs2, [])
def test_ticket7095(self):
# Updates that are filtered on the model being updated are somewhat
# tricky in MySQL.
ManagedModel.objects.create(data="mm1", tag=self.t1, public=True)
self.assertEqual(ManagedModel.objects.update(data="mm"), 1)
# A values() or values_list() query across joined models must use outer
# joins appropriately.
# Note: In Oracle, we expect a null CharField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_charfield_repr = ""
else:
expected_null_charfield_repr = None
self.assertSequenceEqual(
Report.objects.values_list("creator__extra__info", flat=True).order_by(
"name"
),
["e1", "e2", expected_null_charfield_repr],
)
# Similarly for select_related(), joins beyond an initial nullable join
# must use outer joins so that all results are included.
self.assertSequenceEqual(
Report.objects.select_related("creator", "creator__extra").order_by("name"),
[self.r1, self.r2, self.r3],
)
# When there are multiple paths to a table from another table, we have
# to be careful not to accidentally reuse an inappropriate join when
# using select_related(). We used to return the parent's Detail record
# here by mistake.
d1 = Detail.objects.create(data="d1")
d2 = Detail.objects.create(data="d2")
m1 = Member.objects.create(name="m1", details=d1)
m2 = Member.objects.create(name="m2", details=d2)
Child.objects.create(person=m2, parent=m1)
obj = m1.children.select_related("person__details")[0]
self.assertEqual(obj.person.details.data, "d2")
def test_order_by_resetting(self):
# Calling order_by() with no parameters removes any existing ordering on the
# model. But it should still be possible to add new ordering after that.
qs = Author.objects.order_by().order_by("name")
self.assertIn("ORDER BY", qs.query.get_compiler(qs.db).as_sql()[0])
def test_order_by_reverse_fk(self):
# It is possible to order by reverse of foreign key, although that can lead
# to duplicate results.
c1 = SimpleCategory.objects.create(name="category1")
c2 = SimpleCategory.objects.create(name="category2")
CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c1)
self.assertSequenceEqual(
SimpleCategory.objects.order_by("categoryitem", "pk"), [c1, c2, c1]
)
def test_filter_reverse_non_integer_pk(self):
date_obj = DateTimePK.objects.create()
extra_obj = ExtraInfo.objects.create(info="extra", date=date_obj)
self.assertEqual(
DateTimePK.objects.filter(extrainfo=extra_obj).get(),
date_obj,
)
def test_ticket10181(self):
# Avoid raising an EmptyResultSet if an inner query is probably
# empty (and hence, not executed).
self.assertQuerysetEqual(
Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])), []
)
def test_ticket15316_filter_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(
name="named category1", special_name="special1"
)
c3 = SpecialCategory.objects.create(
name="named category2", special_name="special2"
)
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 2)
self.assertCountEqual(qs, [ci2, ci3])
def test_ticket15316_exclude_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(
name="named category1", special_name="special1"
)
c3 = SpecialCategory.objects.create(
name="named category2", special_name="special2"
)
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_filter_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(
name="named category1", special_name="special1"
)
c3 = SpecialCategory.objects.create(
name="named category2", special_name="special2"
)
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_exclude_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(
name="named category1", special_name="special1"
)
c3 = SpecialCategory.objects.create(
name="named category2", special_name="special2"
)
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 2)
self.assertCountEqual(qs, [ci2, ci3])
def test_ticket15316_one2one_filter_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(
category__onetoonecategory__isnull=False
).order_by("pk")
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_one2one_exclude_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_one2one_filter_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_one2one_exclude_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(
category__onetoonecategory__isnull=True
).order_by("pk")
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
class Queries5Tests(TestCase):
@classmethod
def setUpTestData(cls):
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the
# Meta.ordering will be rank3, rank2, rank1.
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
cls.n2 = Note.objects.create(note="n2", misc="bar", id=2)
e1 = ExtraInfo.objects.create(info="e1", note=cls.n1)
e2 = ExtraInfo.objects.create(info="e2", note=cls.n2)
a1 = Author.objects.create(name="a1", num=1001, extra=e1)
a2 = Author.objects.create(name="a2", num=2002, extra=e1)
a3 = Author.objects.create(name="a3", num=3003, extra=e2)
cls.rank2 = Ranking.objects.create(rank=2, author=a2)
cls.rank1 = Ranking.objects.create(rank=1, author=a3)
cls.rank3 = Ranking.objects.create(rank=3, author=a1)
def test_ordering(self):
# Cross model ordering is possible in Meta, too.
self.assertSequenceEqual(
Ranking.objects.all(),
[self.rank3, self.rank2, self.rank1],
)
self.assertSequenceEqual(
Ranking.objects.order_by("rank"),
[self.rank1, self.rank2, self.rank3],
)
# Ordering of extra() pieces is possible, too and you can mix extra
# fields and model fields in the ordering.
self.assertSequenceEqual(
Ranking.objects.extra(
tables=["django_site"], order_by=["-django_site.id", "rank"]
),
[self.rank1, self.rank2, self.rank3],
)
sql = "case when %s > 2 then 1 else 0 end" % connection.ops.quote_name("rank")
qs = Ranking.objects.extra(select={"good": sql})
self.assertEqual(
[o.good for o in qs.extra(order_by=("-good",))], [True, False, False]
)
self.assertSequenceEqual(
qs.extra(order_by=("-good", "id")),
[self.rank3, self.rank2, self.rank1],
)
# Despite having some extra aliases in the query, we can still omit
# them in a values() query.
dicts = qs.values("id", "rank").order_by("id")
self.assertEqual([d["rank"] for d in dicts], [2, 1, 3])
def test_ticket7256(self):
# An empty values() call includes all aliases, including those from an
# extra()
sql = "case when %s > 2 then 1 else 0 end" % connection.ops.quote_name("rank")
qs = Ranking.objects.extra(select={"good": sql})
dicts = qs.values().order_by("id")
for d in dicts:
del d["id"]
del d["author_id"]
self.assertEqual(
[sorted(d.items()) for d in dicts],
[
[("good", 0), ("rank", 2)],
[("good", 0), ("rank", 1)],
[("good", 1), ("rank", 3)],
],
)
def test_ticket7045(self):
# Extra tables used to crash SQL construction on the second use.
qs = Ranking.objects.extra(tables=["django_site"])
qs.query.get_compiler(qs.db).as_sql()
# test passes if this doesn't raise an exception.
qs.query.get_compiler(qs.db).as_sql()
def test_ticket9848(self):
# Make sure that updates which only filter on sub-tables don't
# inadvertently update the wrong records (bug #9848).
author_start = Author.objects.get(name="a1")
ranking_start = Ranking.objects.get(author__name="a1")
# Make sure that the IDs from different tables don't happen to match.
self.assertSequenceEqual(
Ranking.objects.filter(author__name="a1"),
[self.rank3],
)
self.assertEqual(Ranking.objects.filter(author__name="a1").update(rank=4636), 1)
r = Ranking.objects.get(author__name="a1")
self.assertEqual(r.id, ranking_start.id)
self.assertEqual(r.author.id, author_start.id)
self.assertEqual(r.rank, 4636)
r.rank = 3
r.save()
self.assertSequenceEqual(
Ranking.objects.all(),
[self.rank3, self.rank2, self.rank1],
)
def test_ticket5261(self):
# Test different empty excludes.
self.assertSequenceEqual(
Note.objects.exclude(Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.filter(~Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.filter(~Q() | ~Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.exclude(~Q() & ~Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.exclude(~Q() ^ ~Q()),
[self.n1, self.n2],
)
def test_extra_select_literal_percent_s(self):
# Allow %%s to escape select clauses
self.assertEqual(Note.objects.extra(select={"foo": "'%%s'"})[0].foo, "%s")
self.assertEqual(
Note.objects.extra(select={"foo": "'%%s bar %%s'"})[0].foo, "%s bar %s"
)
self.assertEqual(
Note.objects.extra(select={"foo": "'bar %%s'"})[0].foo, "bar %s"
)
def test_extra_select_alias_sql_injection(self):
crafted_alias = """injected_name" from "queries_note"; --"""
msg = (
"Column aliases cannot contain whitespace characters, quotation marks, "
"semicolons, or SQL comments."
)
with self.assertRaisesMessage(ValueError, msg):
Note.objects.extra(select={crafted_alias: "1"})
def test_queryset_reuse(self):
# Using querysets doesn't mutate aliases.
authors = Author.objects.filter(Q(name="a1") | Q(name="nonexistent"))
self.assertEqual(Ranking.objects.filter(author__in=authors).get(), self.rank3)
self.assertEqual(authors.count(), 1)
def test_filter_unsaved_object(self):
# These tests will catch ValueError in Django 5.0 when passing unsaved
# model instances to related filters becomes forbidden.
# msg = "Model instances passed to related filters must be saved."
msg = "Passing unsaved model instances to related filters is deprecated."
company = Company.objects.create(name="Django")
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
Employment.objects.filter(employer=Company(name="unsaved"))
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
Employment.objects.filter(employer__in=[company, Company(name="unsaved")])
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
StaffUser.objects.filter(staff=Staff(name="unsaved"))
class SelectRelatedTests(TestCase):
def test_tickets_3045_3288(self):
# Once upon a time, select_related() with circular relations would loop
# infinitely if you forgot to specify "depth". Now we set an arbitrary
# default upper bound.
self.assertQuerysetEqual(X.objects.all(), [])
self.assertQuerysetEqual(X.objects.select_related(), [])
class SubclassFKTests(TestCase):
def test_ticket7778(self):
# Model subclasses could not be deleted if a nullable foreign key
# relates to a model that relates back.
num_celebs = Celebrity.objects.count()
tvc = TvChef.objects.create(name="Huey")
self.assertEqual(Celebrity.objects.count(), num_celebs + 1)
Fan.objects.create(fan_of=tvc)
Fan.objects.create(fan_of=tvc)
tvc.delete()
# The parent object should have been deleted as well.
self.assertEqual(Celebrity.objects.count(), num_celebs)
class CustomPkTests(TestCase):
def test_ticket7371(self):
self.assertQuerysetEqual(Related.objects.order_by("custom"), [])
class NullableRelOrderingTests(TestCase):
def test_ticket10028(self):
# Ordering by model related to nullable relations(!) should use outer
# joins, so that all results are included.
p1 = Plaything.objects.create(name="p1")
self.assertSequenceEqual(Plaything.objects.all(), [p1])
def test_join_already_in_query(self):
# Ordering by model related to nullable relations should not change
# the join type of already existing joins.
Plaything.objects.create(name="p1")
s = SingleObject.objects.create(name="s")
r = RelatedObject.objects.create(single=s, f=1)
p2 = Plaything.objects.create(name="p2", others=r)
qs = Plaything.objects.filter(others__isnull=False).order_by("pk")
self.assertNotIn("JOIN", str(qs.query))
qs = Plaything.objects.filter(others__f__isnull=False).order_by("pk")
self.assertIn("INNER", str(qs.query))
qs = qs.order_by("others__single__name")
# The ordering by others__single__pk will add one new join (to single)
# and that join must be LEFT join. The already existing join to related
# objects must be kept INNER. So, we have both an INNER and a LEFT join
# in the query.
self.assertEqual(str(qs.query).count("LEFT"), 1)
self.assertEqual(str(qs.query).count("INNER"), 1)
self.assertSequenceEqual(qs, [p2])
class DisjunctiveFilterTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
cls.e1 = ExtraInfo.objects.create(info="e1", note=cls.n1)
def test_ticket7872(self):
# Another variation on the disjunctive filtering theme.
# For the purposes of this regression test, it's important that there is no
# Join object related to the LeafA we create.
l1 = LeafA.objects.create(data="first")
self.assertSequenceEqual(LeafA.objects.all(), [l1])
self.assertSequenceEqual(
LeafA.objects.filter(Q(data="first") | Q(join__b__data="second")),
[l1],
)
def test_ticket8283(self):
# Checking that applying filters after a disjunction works correctly.
self.assertSequenceEqual(
(
ExtraInfo.objects.filter(note=self.n1)
| ExtraInfo.objects.filter(info="e2")
).filter(note=self.n1),
[self.e1],
)
self.assertSequenceEqual(
(
ExtraInfo.objects.filter(info="e2")
| ExtraInfo.objects.filter(note=self.n1)
).filter(note=self.n1),
[self.e1],
)
class Queries6Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name="t1", category=generic)
cls.t2 = Tag.objects.create(name="t2", parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name="t3", parent=cls.t1)
cls.t4 = Tag.objects.create(name="t4", parent=cls.t3)
cls.t5 = Tag.objects.create(name="t5", parent=cls.t3)
n1 = Note.objects.create(note="n1", misc="foo", id=1)
cls.ann1 = Annotation.objects.create(name="a1", tag=cls.t1)
cls.ann1.notes.add(n1)
cls.ann2 = Annotation.objects.create(name="a2", tag=cls.t4)
def test_parallel_iterators(self):
# Parallel iterators work.
qs = Tag.objects.all()
i1, i2 = iter(qs), iter(qs)
self.assertEqual(repr(next(i1)), "<Tag: t1>")
self.assertEqual(repr(next(i1)), "<Tag: t2>")
self.assertEqual(repr(next(i2)), "<Tag: t1>")
self.assertEqual(repr(next(i2)), "<Tag: t2>")
self.assertEqual(repr(next(i2)), "<Tag: t3>")
self.assertEqual(repr(next(i1)), "<Tag: t3>")
qs = X.objects.all()
self.assertFalse(qs)
self.assertFalse(qs)
def test_nested_queries_sql(self):
# Nested queries should not evaluate the inner query as part of constructing the
# SQL (so we should see a nested query here, indicated by two "SELECT" calls).
qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy"))
self.assertEqual(qs.query.get_compiler(qs.db).as_sql()[0].count("SELECT"), 2)
def test_tickets_8921_9188(self):
# Incorrect SQL was being generated for certain types of exclude()
# queries that crossed multi-valued relations (#8921, #9188 and some
# preemptively discovered cases).
self.assertSequenceEqual(
PointerA.objects.filter(connection__pointerb__id=1), []
)
self.assertSequenceEqual(
PointerA.objects.exclude(connection__pointerb__id=1), []
)
self.assertSequenceEqual(
Tag.objects.exclude(children=None),
[self.t1, self.t3],
)
# This example is tricky because the parent could be NULL, so only checking
# parents with annotations omits some results (tag t1, in this case).
self.assertSequenceEqual(
Tag.objects.exclude(parent__annotation__name="a1"),
[self.t1, self.t4, self.t5],
)
# The annotation->tag link is single values and tag->children links is
# multi-valued. So we have to split the exclude filter in the middle
# and then optimize the inner query without losing results.
self.assertSequenceEqual(
Annotation.objects.exclude(tag__children__name="t2"),
[self.ann2],
)
# Nested queries are possible (although should be used with care, since
# they have performance problems on backends like MySQL.
self.assertSequenceEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")),
[self.ann1],
)
def test_ticket3739(self):
# The all() method on querysets returns a copy of the queryset.
q1 = Tag.objects.order_by("name")
self.assertIsNot(q1, q1.all())
def test_ticket_11320(self):
qs = Tag.objects.exclude(category=None).exclude(category__name="foo")
self.assertEqual(str(qs.query).count(" INNER JOIN "), 1)
def test_distinct_ordered_sliced_subquery_aggregation(self):
self.assertEqual(
Tag.objects.distinct().order_by("category__name")[:3].count(), 3
)
def test_multiple_columns_with_the_same_name_slice(self):
self.assertEqual(
list(
Tag.objects.order_by("name").values_list("name", "category__name")[:2]
),
[("t1", "Generic"), ("t2", "Generic")],
)
self.assertSequenceEqual(
Tag.objects.order_by("name").select_related("category")[:2],
[self.t1, self.t2],
)
self.assertEqual(
list(Tag.objects.order_by("-name").values_list("name", "parent__name")[:2]),
[("t5", "t3"), ("t4", "t3")],
)
self.assertSequenceEqual(
Tag.objects.order_by("-name").select_related("parent")[:2],
[self.t5, self.t4],
)
def test_col_alias_quoted(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertEqual(
Tag.objects.values("parent")
.annotate(
tag_per_parent=Count("pk"),
)
.aggregate(Max("tag_per_parent")),
{"tag_per_parent__max": 2},
)
sql = captured_queries[0]["sql"]
self.assertIn("AS %s" % connection.ops.quote_name("col1"), sql)
def test_xor_subquery(self):
self.assertSequenceEqual(
Tag.objects.filter(
Exists(Tag.objects.filter(id=OuterRef("id"), name="t3"))
^ Exists(Tag.objects.filter(id=OuterRef("id"), parent=self.t1))
),
[self.t2],
)
class RawQueriesTests(TestCase):
@classmethod
def setUpTestData(cls):
Note.objects.create(note="n1", misc="foo", id=1)
def test_ticket14729(self):
# Test representation of raw query with one or few parameters passed as list
query = "SELECT * FROM queries_note WHERE note = %s"
params = ["n1"]
qs = Note.objects.raw(query, params=params)
self.assertEqual(
repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1>"
)
query = "SELECT * FROM queries_note WHERE note = %s and misc = %s"
params = ["n1", "foo"]
qs = Note.objects.raw(query, params=params)
self.assertEqual(
repr(qs),
"<RawQuerySet: SELECT * FROM queries_note WHERE note = n1 and misc = foo>",
)
class GeneratorExpressionTests(SimpleTestCase):
def test_ticket10432(self):
# Using an empty iterator as the rvalue for an "__in"
# lookup is legal.
self.assertCountEqual(Note.objects.filter(pk__in=iter(())), [])
class ComparisonTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
e1 = ExtraInfo.objects.create(info="e1", note=cls.n1)
cls.a2 = Author.objects.create(name="a2", num=2002, extra=e1)
def test_ticket8597(self):
# Regression tests for case-insensitive comparisons
item_ab = Item.objects.create(
name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1
)
item_xy = Item.objects.create(
name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1
)
self.assertSequenceEqual(
Item.objects.filter(name__iexact="A_b"),
[item_ab],
)
self.assertSequenceEqual(
Item.objects.filter(name__iexact="x%Y"),
[item_xy],
)
self.assertSequenceEqual(
Item.objects.filter(name__istartswith="A_b"),
[item_ab],
)
self.assertSequenceEqual(
Item.objects.filter(name__iendswith="A_b"),
[item_ab],
)
class ExistsSql(TestCase):
def test_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertFalse(Tag.objects.exists())
# Ok - so the exist query worked - but did it include too many columns?
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]["sql"]
id, name = connection.ops.quote_name("id"), connection.ops.quote_name("name")
self.assertNotIn(id, qstr)
self.assertNotIn(name, qstr)
def test_distinct_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertIs(Article.objects.distinct().exists(), False)
self.assertEqual(len(captured_queries), 1)
captured_sql = captured_queries[0]["sql"]
self.assertNotIn(connection.ops.quote_name("id"), captured_sql)
self.assertNotIn(connection.ops.quote_name("name"), captured_sql)
def test_sliced_distinct_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertIs(Article.objects.distinct()[1:3].exists(), False)
self.assertEqual(len(captured_queries), 1)
captured_sql = captured_queries[0]["sql"]
self.assertIn(connection.ops.quote_name("id"), captured_sql)
self.assertIn(connection.ops.quote_name("name"), captured_sql)
def test_ticket_18414(self):
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="two", created=datetime.datetime.now())
self.assertTrue(Article.objects.exists())
self.assertTrue(Article.objects.distinct().exists())
self.assertTrue(Article.objects.distinct()[1:3].exists())
self.assertFalse(Article.objects.distinct()[1:1].exists())
@skipUnlessDBFeature("can_distinct_on_fields")
def test_ticket_18414_distinct_on(self):
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="two", created=datetime.datetime.now())
self.assertTrue(Article.objects.distinct("name").exists())
self.assertTrue(Article.objects.distinct("name")[1:2].exists())
self.assertFalse(Article.objects.distinct("name")[2:3].exists())
class QuerysetOrderedTests(unittest.TestCase):
"""
Tests for the Queryset.ordered attribute.
"""
def test_no_default_or_explicit_ordering(self):
self.assertIs(Annotation.objects.all().ordered, False)
def test_cleared_default_ordering(self):
self.assertIs(Tag.objects.all().ordered, True)
self.assertIs(Tag.objects.order_by().ordered, False)
def test_explicit_ordering(self):
self.assertIs(Annotation.objects.order_by("id").ordered, True)
def test_empty_queryset(self):
self.assertIs(Annotation.objects.none().ordered, True)
def test_order_by_extra(self):
self.assertIs(Annotation.objects.extra(order_by=["id"]).ordered, True)
def test_annotated_ordering(self):
qs = Annotation.objects.annotate(num_notes=Count("notes"))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by("num_notes").ordered, True)
def test_annotated_default_ordering(self):
qs = Tag.objects.annotate(num_notes=Count("pk"))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by("name").ordered, True)
def test_annotated_values_default_ordering(self):
qs = Tag.objects.values("name").annotate(num_notes=Count("pk"))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by("name").ordered, True)
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
class SubqueryTests(TestCase):
@classmethod
def setUpTestData(cls):
NamedCategory.objects.create(id=1, name="first")
NamedCategory.objects.create(id=2, name="second")
NamedCategory.objects.create(id=3, name="third")
NamedCategory.objects.create(id=4, name="fourth")
def test_ordered_subselect(self):
"Subselects honor any manual ordering"
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[0:2]
)
self.assertEqual(set(query.values_list("id", flat=True)), {3, 4})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[:2]
)
self.assertEqual(set(query.values_list("id", flat=True)), {3, 4})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[1:2]
)
self.assertEqual(set(query.values_list("id", flat=True)), {3})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[2:]
)
self.assertEqual(set(query.values_list("id", flat=True)), {1, 2})
def test_slice_subquery_and_query(self):
"""
Slice a query that has a sliced subquery
"""
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[0:2]
)[0:2]
self.assertEqual({x.id for x in query}, {3, 4})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[1:3]
)[1:3]
self.assertEqual({x.id for x in query}, {3})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[2:]
)[1:]
self.assertEqual({x.id for x in query}, {2})
def test_related_sliced_subquery(self):
"""
Related objects constraints can safely contain sliced subqueries.
refs #22434
"""
generic = NamedCategory.objects.create(id=5, name="Generic")
t1 = Tag.objects.create(name="t1", category=generic)
t2 = Tag.objects.create(name="t2", category=generic)
ManagedModel.objects.create(data="mm1", tag=t1, public=True)
mm2 = ManagedModel.objects.create(data="mm2", tag=t2, public=True)
query = ManagedModel.normal_manager.filter(
tag__in=Tag.objects.order_by("-id")[:1]
)
self.assertEqual({x.id for x in query}, {mm2.id})
def test_sliced_delete(self):
"Delete queries can safely contain sliced subqueries"
DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[0:1]
).delete()
self.assertEqual(
set(DumbCategory.objects.values_list("id", flat=True)), {1, 2, 3}
)
DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[1:2]
).delete()
self.assertEqual(set(DumbCategory.objects.values_list("id", flat=True)), {1, 3})
DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[1:]
).delete()
self.assertEqual(set(DumbCategory.objects.values_list("id", flat=True)), {3})
def test_distinct_ordered_sliced_subquery(self):
# Implicit values('id').
self.assertSequenceEqual(
NamedCategory.objects.filter(
id__in=NamedCategory.objects.distinct().order_by("name")[0:2],
)
.order_by("name")
.values_list("name", flat=True),
["first", "fourth"],
)
# Explicit values('id').
self.assertSequenceEqual(
NamedCategory.objects.filter(
id__in=NamedCategory.objects.distinct()
.order_by("-name")
.values("id")[0:2],
)
.order_by("name")
.values_list("name", flat=True),
["second", "third"],
)
# Annotated value.
self.assertSequenceEqual(
DumbCategory.objects.filter(
id__in=DumbCategory.objects.annotate(double_id=F("id") * 2)
.order_by("id")
.distinct()
.values("double_id")[0:2],
)
.order_by("id")
.values_list("id", flat=True),
[2, 4],
)
class QuerySetBitwiseOperationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.school = School.objects.create()
cls.room_1 = Classroom.objects.create(
school=cls.school, has_blackboard=False, name="Room 1"
)
cls.room_2 = Classroom.objects.create(
school=cls.school, has_blackboard=True, name="Room 2"
)
cls.room_3 = Classroom.objects.create(
school=cls.school, has_blackboard=True, name="Room 3"
)
cls.room_4 = Classroom.objects.create(
school=cls.school, has_blackboard=False, name="Room 4"
)
tag = Tag.objects.create()
cls.annotation_1 = Annotation.objects.create(tag=tag)
annotation_2 = Annotation.objects.create(tag=tag)
note = cls.annotation_1.notes.create(tag=tag)
cls.base_user_1 = BaseUser.objects.create(annotation=cls.annotation_1)
cls.base_user_2 = BaseUser.objects.create(annotation=annotation_2)
cls.task = Task.objects.create(
owner=cls.base_user_2,
creator=cls.base_user_2,
note=note,
)
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_or_with_rhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)
qs2 = Classroom.objects.filter(has_blackboard=False)[:1]
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_3])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_or_with_lhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)[:1]
qs2 = Classroom.objects.filter(has_blackboard=False)
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_4])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_or_with_both_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=False)[:1]
qs2 = Classroom.objects.filter(has_blackboard=True)[:1]
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_or_with_both_slice_and_ordering(self):
qs1 = Classroom.objects.filter(has_blackboard=False).order_by("-pk")[:1]
qs2 = Classroom.objects.filter(has_blackboard=True).order_by("-name")[:1]
self.assertCountEqual(qs1 | qs2, [self.room_3, self.room_4])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_xor_with_rhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)
qs2 = Classroom.objects.filter(has_blackboard=False)[:1]
self.assertCountEqual(qs1 ^ qs2, [self.room_1, self.room_2, self.room_3])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_xor_with_lhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)[:1]
qs2 = Classroom.objects.filter(has_blackboard=False)
self.assertCountEqual(qs1 ^ qs2, [self.room_1, self.room_2, self.room_4])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_xor_with_both_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=False)[:1]
qs2 = Classroom.objects.filter(has_blackboard=True)[:1]
self.assertCountEqual(qs1 ^ qs2, [self.room_1, self.room_2])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_xor_with_both_slice_and_ordering(self):
qs1 = Classroom.objects.filter(has_blackboard=False).order_by("-pk")[:1]
qs2 = Classroom.objects.filter(has_blackboard=True).order_by("-name")[:1]
self.assertCountEqual(qs1 ^ qs2, [self.room_3, self.room_4])
def test_subquery_aliases(self):
combined = School.objects.filter(pk__isnull=False) & School.objects.filter(
Exists(
Classroom.objects.filter(
has_blackboard=True,
school=OuterRef("pk"),
)
),
)
self.assertSequenceEqual(combined, [self.school])
nested_combined = School.objects.filter(pk__in=combined.values("pk"))
self.assertSequenceEqual(nested_combined, [self.school])
def test_conflicting_aliases_during_combine(self):
qs1 = self.annotation_1.baseuser_set.all()
qs2 = BaseUser.objects.filter(
Q(owner__note__in=self.annotation_1.notes.all())
| Q(creator__note__in=self.annotation_1.notes.all())
)
self.assertSequenceEqual(qs1, [self.base_user_1])
self.assertSequenceEqual(qs2, [self.base_user_2])
self.assertCountEqual(qs2 | qs1, qs1 | qs2)
self.assertCountEqual(qs2 | qs1, [self.base_user_1, self.base_user_2])
class CloneTests(TestCase):
def test_evaluated_queryset_as_argument(self):
"""
If a queryset is already evaluated, it can still be used as a query arg.
"""
n = Note(note="Test1", misc="misc")
n.save()
e = ExtraInfo(info="good", note=n)
e.save()
n_list = Note.objects.all()
# Evaluate the Note queryset, populating the query cache
list(n_list)
# Make one of cached results unpickable.
n_list._result_cache[0].lock = Lock()
with self.assertRaises(TypeError):
pickle.dumps(n_list)
# Use the note queryset in a query, and evaluate
# that query in a way that involves cloning.
self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, "good")
def test_no_model_options_cloning(self):
"""
Cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta)
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail(
"Model options shouldn't be cloned."
)
try:
Note.objects.filter(pk__lte=F("pk") + 1).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
def test_no_fields_cloning(self):
"""
Cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta.get_field("misc"))
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail(
"Model fields shouldn't be cloned"
)
try:
Note.objects.filter(note=F("misc")).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
class EmptyQuerySetTests(SimpleTestCase):
def test_emptyqueryset_values(self):
# #14366 -- Calling .values() on an empty QuerySet and then cloning
# that should not cause an error
self.assertCountEqual(Number.objects.none().values("num").order_by("num"), [])
def test_values_subquery(self):
self.assertCountEqual(
Number.objects.filter(pk__in=Number.objects.none().values("pk")), []
)
self.assertCountEqual(
Number.objects.filter(pk__in=Number.objects.none().values_list("pk")), []
)
def test_ticket_19151(self):
# #19151 -- Calling .values() or .values_list() on an empty QuerySet
# should return an empty QuerySet and not cause an error.
q = Author.objects.none()
self.assertCountEqual(q.values(), [])
self.assertCountEqual(q.values_list(), [])
class ValuesQuerysetTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=72)
def test_flat_values_list(self):
qs = Number.objects.values_list("num")
qs = qs.values_list("num", flat=True)
self.assertSequenceEqual(qs, [72])
def test_extra_values(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={"value_plus_x": "num+%s", "value_minus_x": "num-%s"},
select_params=(1, 2),
)
qs = qs.order_by("value_minus_x")
qs = qs.values("num")
self.assertSequenceEqual(qs, [{"num": 72}])
def test_extra_values_order_twice(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={"value_plus_one": "num+1", "value_minus_one": "num-1"}
)
qs = qs.order_by("value_minus_one").order_by("value_plus_one")
qs = qs.values("num")
self.assertSequenceEqual(qs, [{"num": 72}])
def test_extra_values_order_multiple(self):
# Postgres doesn't allow constants in order by, so check for that.
qs = Number.objects.extra(
select={
"value_plus_one": "num+1",
"value_minus_one": "num-1",
"constant_value": "1",
}
)
qs = qs.order_by("value_plus_one", "value_minus_one", "constant_value")
qs = qs.values("num")
self.assertSequenceEqual(qs, [{"num": 72}])
def test_extra_values_order_in_extra(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={"value_plus_one": "num+1", "value_minus_one": "num-1"},
order_by=["value_minus_one"],
)
qs = qs.values("num")
def test_extra_select_params_values_order_in_extra(self):
# testing for 23259 issue
qs = Number.objects.extra(
select={"value_plus_x": "num+%s"},
select_params=[1],
order_by=["value_plus_x"],
)
qs = qs.filter(num=72)
qs = qs.values("num")
self.assertSequenceEqual(qs, [{"num": 72}])
def test_extra_multiple_select_params_values_order_by(self):
# testing for 23259 issue
qs = Number.objects.extra(
select={"value_plus_x": "num+%s", "value_minus_x": "num-%s"},
select_params=(72, 72),
)
qs = qs.order_by("value_minus_x")
qs = qs.filter(num=1)
qs = qs.values("num")
self.assertSequenceEqual(qs, [])
def test_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={"value_plus_one": "num+1"})
qs = qs.order_by("value_plus_one")
qs = qs.values_list("num")
self.assertSequenceEqual(qs, [(72,)])
def test_flat_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={"value_plus_one": "num+1"})
qs = qs.order_by("value_plus_one")
qs = qs.values_list("num", flat=True)
self.assertSequenceEqual(qs, [72])
def test_field_error_values_list(self):
# see #23443
msg = (
"Cannot resolve keyword %r into field. Join on 'name' not permitted."
% "foo"
)
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.values_list("name__foo")
def test_named_values_list_flat(self):
msg = "'flat' and 'named' can't be used together."
with self.assertRaisesMessage(TypeError, msg):
Number.objects.values_list("num", flat=True, named=True)
def test_named_values_list_bad_field_name(self):
msg = "Type names and field names must be valid identifiers: '1'"
with self.assertRaisesMessage(ValueError, msg):
Number.objects.extra(select={"1": "num+1"}).values_list(
"1", named=True
).first()
def test_named_values_list_with_fields(self):
qs = Number.objects.extra(select={"num2": "num+1"}).annotate(Count("id"))
values = qs.values_list("num", "num2", named=True).first()
self.assertEqual(type(values).__name__, "Row")
self.assertEqual(values._fields, ("num", "num2"))
self.assertEqual(values.num, 72)
self.assertEqual(values.num2, 73)
def test_named_values_list_without_fields(self):
qs = Number.objects.extra(select={"num2": "num+1"}).annotate(Count("id"))
values = qs.values_list(named=True).first()
self.assertEqual(type(values).__name__, "Row")
self.assertEqual(
values._fields,
("num2", "id", "num", "other_num", "another_num", "id__count"),
)
self.assertEqual(values.num, 72)
self.assertEqual(values.num2, 73)
self.assertEqual(values.id__count, 1)
def test_named_values_list_expression_with_default_alias(self):
expr = Count("id")
values = (
Number.objects.annotate(id__count1=expr)
.values_list(expr, "id__count1", named=True)
.first()
)
self.assertEqual(values._fields, ("id__count2", "id__count1"))
def test_named_values_list_expression(self):
expr = F("num") + 1
qs = Number.objects.annotate(combinedexpression1=expr).values_list(
expr, "combinedexpression1", named=True
)
values = qs.first()
self.assertEqual(values._fields, ("combinedexpression2", "combinedexpression1"))
def test_named_values_pickle(self):
value = Number.objects.values_list("num", "other_num", named=True).get()
self.assertEqual(value, (72, None))
self.assertEqual(pickle.loads(pickle.dumps(value)), value)
class QuerySetSupportsPythonIdioms(TestCase):
@classmethod
def setUpTestData(cls):
some_date = datetime.datetime(2014, 5, 16, 12, 1)
cls.articles = [
Article.objects.create(name=f"Article {i}", created=some_date)
for i in range(1, 8)
]
def get_ordered_articles(self):
return Article.objects.order_by("name")
def test_can_get_items_using_index_and_slice_notation(self):
self.assertEqual(self.get_ordered_articles()[0].name, "Article 1")
self.assertSequenceEqual(
self.get_ordered_articles()[1:3],
[self.articles[1], self.articles[2]],
)
def test_slicing_with_steps_can_be_used(self):
self.assertSequenceEqual(
self.get_ordered_articles()[::2],
[
self.articles[0],
self.articles[2],
self.articles[4],
self.articles[6],
],
)
def test_slicing_without_step_is_lazy(self):
with self.assertNumQueries(0):
self.get_ordered_articles()[0:5]
def test_slicing_with_tests_is_not_lazy(self):
with self.assertNumQueries(1):
self.get_ordered_articles()[0:5:3]
def test_slicing_can_slice_again_after_slicing(self):
self.assertSequenceEqual(
self.get_ordered_articles()[0:5][0:2],
[self.articles[0], self.articles[1]],
)
self.assertSequenceEqual(
self.get_ordered_articles()[0:5][4:], [self.articles[4]]
)
self.assertSequenceEqual(self.get_ordered_articles()[0:5][5:], [])
# Some more tests!
self.assertSequenceEqual(
self.get_ordered_articles()[2:][0:2],
[self.articles[2], self.articles[3]],
)
self.assertSequenceEqual(
self.get_ordered_articles()[2:][:2],
[self.articles[2], self.articles[3]],
)
self.assertSequenceEqual(
self.get_ordered_articles()[2:][2:3], [self.articles[4]]
)
# Using an offset without a limit is also possible.
self.assertSequenceEqual(
self.get_ordered_articles()[5:],
[self.articles[5], self.articles[6]],
)
def test_slicing_cannot_filter_queryset_once_sliced(self):
msg = "Cannot filter a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:5].filter(id=1)
def test_slicing_cannot_reorder_queryset_once_sliced(self):
msg = "Cannot reorder a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:5].order_by("id")
def test_slicing_cannot_combine_queries_once_sliced(self):
msg = "Cannot combine queries once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:1] & Article.objects.all()[4:5]
def test_slicing_negative_indexing_not_supported_for_single_element(self):
"""hint: inverting your ordering might do what you need"""
msg = "Negative indexing is not supported."
with self.assertRaisesMessage(ValueError, msg):
Article.objects.all()[-1]
def test_slicing_negative_indexing_not_supported_for_range(self):
"""hint: inverting your ordering might do what you need"""
msg = "Negative indexing is not supported."
with self.assertRaisesMessage(ValueError, msg):
Article.objects.all()[0:-5]
with self.assertRaisesMessage(ValueError, msg):
Article.objects.all()[-1:]
def test_invalid_index(self):
msg = "QuerySet indices must be integers or slices, not str."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()["foo"]
def test_can_get_number_of_items_in_queryset_using_standard_len(self):
self.assertEqual(len(Article.objects.filter(name__exact="Article 1")), 1)
def test_can_combine_queries_using_and_and_or_operators(self):
s1 = Article.objects.filter(name__exact="Article 1")
s2 = Article.objects.filter(name__exact="Article 2")
self.assertSequenceEqual(
(s1 | s2).order_by("name"),
[self.articles[0], self.articles[1]],
)
self.assertSequenceEqual(s1 & s2, [])
class WeirdQuerysetSlicingTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=1)
Number.objects.create(num=2)
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="two", created=datetime.datetime.now())
Article.objects.create(name="three", created=datetime.datetime.now())
Article.objects.create(name="four", created=datetime.datetime.now())
food = Food.objects.create(name="spam")
Eaten.objects.create(meal="spam with eggs", food=food)
def test_tickets_7698_10202(self):
# People like to slice with '0' as the high-water mark.
self.assertQuerysetEqual(Article.objects.all()[0:0], [])
self.assertQuerysetEqual(Article.objects.all()[0:0][:10], [])
self.assertEqual(Article.objects.all()[:0].count(), 0)
msg = "Cannot change a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[:0].latest("created")
def test_empty_resultset_sql(self):
# ticket #12192
self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1]))
def test_empty_sliced_subquery(self):
self.assertEqual(
Eaten.objects.filter(food__in=Food.objects.all()[0:0]).count(), 0
)
def test_empty_sliced_subquery_exclude(self):
self.assertEqual(
Eaten.objects.exclude(food__in=Food.objects.all()[0:0]).count(), 1
)
def test_zero_length_values_slicing(self):
n = 42
with self.assertNumQueries(0):
self.assertQuerysetEqual(Article.objects.values()[n:n], [])
self.assertQuerysetEqual(Article.objects.values_list()[n:n], [])
class EscapingTests(TestCase):
def test_ticket_7302(self):
# Reserved names are appropriately escaped
r_a = ReservedName.objects.create(name="a", order=42)
r_b = ReservedName.objects.create(name="b", order=37)
self.assertSequenceEqual(
ReservedName.objects.order_by("order"),
[r_b, r_a],
)
self.assertSequenceEqual(
ReservedName.objects.extra(
select={"stuff": "name"}, order_by=("order", "stuff")
),
[r_b, r_a],
)
class ToFieldTests(TestCase):
def test_in_query(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food__in=[apple, pear])),
{lunch, dinner},
)
def test_in_subquery(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Eaten.objects.filter(food__in=Food.objects.filter(name="apple"))),
{lunch},
)
self.assertEqual(
set(
Eaten.objects.filter(
food__in=Food.objects.filter(name="apple").values("eaten__meal")
)
),
set(),
)
self.assertEqual(
set(Food.objects.filter(eaten__in=Eaten.objects.filter(meal="lunch"))),
{apple},
)
def test_nested_in_subquery(self):
extra = ExtraInfo.objects.create()
author = Author.objects.create(num=42, extra=extra)
report = Report.objects.create(creator=author)
comment = ReportComment.objects.create(report=report)
comments = ReportComment.objects.filter(
report__in=Report.objects.filter(
creator__in=extra.author_set.all(),
),
)
self.assertSequenceEqual(comments, [comment])
def test_reverse_in(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch_apple = Eaten.objects.create(food=apple, meal="lunch")
lunch_pear = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])), {apple, pear}
)
def test_single_object(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=apple, meal="dinner")
self.assertEqual(set(Eaten.objects.filter(food=apple)), {lunch, dinner})
def test_single_object_reverse(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(set(Food.objects.filter(eaten=lunch)), {apple})
def test_recursive_fk(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(list(Node.objects.filter(parent=node1)), [node2])
def test_recursive_fk_reverse(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(list(Node.objects.filter(node=node2)), [node1])
class IsNullTests(TestCase):
def test_primary_key(self):
custom = CustomPk.objects.create(name="pk")
null = Related.objects.create()
notnull = Related.objects.create(custom=custom)
self.assertSequenceEqual(
Related.objects.filter(custom__isnull=False), [notnull]
)
self.assertSequenceEqual(Related.objects.filter(custom__isnull=True), [null])
def test_to_field(self):
apple = Food.objects.create(name="apple")
e1 = Eaten.objects.create(food=apple, meal="lunch")
e2 = Eaten.objects.create(meal="lunch")
self.assertSequenceEqual(
Eaten.objects.filter(food__isnull=False),
[e1],
)
self.assertSequenceEqual(
Eaten.objects.filter(food__isnull=True),
[e2],
)
class ConditionalTests(TestCase):
"""Tests whose execution depend on different environment conditions like
Python version or DB backend features"""
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name="t1", category=generic)
Tag.objects.create(name="t2", parent=t1, category=generic)
t3 = Tag.objects.create(name="t3", parent=t1)
Tag.objects.create(name="t4", parent=t3)
Tag.objects.create(name="t5", parent=t3)
def test_infinite_loop(self):
# If you're not careful, it's possible to introduce infinite loops via
# default ordering on foreign keys in a cycle. We detect that.
with self.assertRaisesMessage(FieldError, "Infinite loop caused by ordering."):
list(LoopX.objects.all()) # Force queryset evaluation with list()
with self.assertRaisesMessage(FieldError, "Infinite loop caused by ordering."):
list(LoopZ.objects.all()) # Force queryset evaluation with list()
# Note that this doesn't cause an infinite loop, since the default
# ordering on the Tag model is empty (and thus defaults to using "id"
# for the related field).
self.assertEqual(len(Tag.objects.order_by("parent")), 5)
# ... but you can still order in a non-recursive fashion among linked
# fields (the previous test failed because the default ordering was
# recursive).
self.assertQuerysetEqual(LoopX.objects.order_by("y__x__y__x__id"), [])
# When grouping without specifying ordering, we add an explicit "ORDER BY NULL"
# portion in MySQL to prevent unnecessary sorting.
@skipUnlessDBFeature("requires_explicit_null_ordering_when_grouping")
def test_null_ordering_added(self):
query = Tag.objects.values_list("parent_id", flat=True).order_by().query
query.group_by = ["parent_id"]
sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0]
fragment = "ORDER BY "
pos = sql.find(fragment)
self.assertEqual(sql.find(fragment, pos + 1), -1)
self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment))
def test_in_list_limit(self):
# The "in" lookup works with lists of 1000 items or more.
# The numbers amount is picked to force three different IN batches
# for Oracle, yet to be less than 2100 parameter limit for MSSQL.
numbers = list(range(2050))
max_query_params = connection.features.max_query_params
if max_query_params is None or max_query_params >= len(numbers):
Number.objects.bulk_create(Number(num=num) for num in numbers)
for number in [1000, 1001, 2000, len(numbers)]:
with self.subTest(number=number):
self.assertEqual(
Number.objects.filter(num__in=numbers[:number]).count(), number
)
class UnionTests(unittest.TestCase):
"""
Tests for the union of two querysets. Bug #12252.
"""
@classmethod
def setUpTestData(cls):
objectas = []
objectbs = []
objectcs = []
a_info = ["one", "two", "three"]
for name in a_info:
o = ObjectA(name=name)
o.save()
objectas.append(o)
b_info = [
("un", 1, objectas[0]),
("deux", 2, objectas[0]),
("trois", 3, objectas[2]),
]
for name, number, objecta in b_info:
o = ObjectB(name=name, num=number, objecta=objecta)
o.save()
objectbs.append(o)
c_info = [("ein", objectas[2], objectbs[2]), ("zwei", objectas[1], objectbs[1])]
for name, objecta, objectb in c_info:
o = ObjectC(name=name, objecta=objecta, objectb=objectb)
o.save()
objectcs.append(o)
def check_union(self, model, Q1, Q2):
filter = model.objects.filter
self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2)))
self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2)))
def test_A_AB(self):
Q1 = Q(name="two")
Q2 = Q(objectb__name="deux")
self.check_union(ObjectA, Q1, Q2)
def test_A_AB2(self):
Q1 = Q(name="two")
Q2 = Q(objectb__name="deux", objectb__num=2)
self.check_union(ObjectA, Q1, Q2)
def test_AB_ACB(self):
Q1 = Q(objectb__name="deux")
Q2 = Q(objectc__objectb__name="deux")
self.check_union(ObjectA, Q1, Q2)
def test_BAB_BAC(self):
Q1 = Q(objecta__objectb__name="deux")
Q2 = Q(objecta__objectc__name="ein")
self.check_union(ObjectB, Q1, Q2)
def test_BAB_BACB(self):
Q1 = Q(objecta__objectb__name="deux")
Q2 = Q(objecta__objectc__objectb__name="trois")
self.check_union(ObjectB, Q1, Q2)
def test_BA_BCA__BAB_BAC_BCA(self):
Q1 = Q(objecta__name="one", objectc__objecta__name="two")
Q2 = Q(
objecta__objectc__name="ein",
objectc__objecta__name="three",
objecta__objectb__name="trois",
)
self.check_union(ObjectB, Q1, Q2)
class DefaultValuesInsertTest(TestCase):
def test_no_extra_params(self):
"""
Can create an instance of a model with only the PK field (#17056)."
"""
DumbCategory.objects.create()
class ExcludeTests(TestCase):
@classmethod
def setUpTestData(cls):
f1 = Food.objects.create(name="apples")
cls.f2 = Food.objects.create(name="oranges")
Eaten.objects.create(food=f1, meal="dinner")
cls.j1 = Job.objects.create(name="Manager")
cls.r1 = Responsibility.objects.create(description="Playing golf")
cls.j2 = Job.objects.create(name="Programmer")
cls.r2 = Responsibility.objects.create(description="Programming")
JobResponsibilities.objects.create(job=cls.j1, responsibility=cls.r1)
JobResponsibilities.objects.create(job=cls.j2, responsibility=cls.r2)
def test_to_field(self):
self.assertSequenceEqual(
Food.objects.exclude(eaten__meal="dinner"),
[self.f2],
)
self.assertSequenceEqual(
Job.objects.exclude(responsibilities__description="Playing golf"),
[self.j2],
)
self.assertSequenceEqual(
Responsibility.objects.exclude(jobs__name="Manager"),
[self.r2],
)
def test_ticket14511(self):
alex = Person.objects.get_or_create(name="Alex")[0]
jane = Person.objects.get_or_create(name="Jane")[0]
oracle = Company.objects.get_or_create(name="Oracle")[0]
google = Company.objects.get_or_create(name="Google")[0]
microsoft = Company.objects.get_or_create(name="Microsoft")[0]
intel = Company.objects.get_or_create(name="Intel")[0]
def employ(employer, employee, title):
Employment.objects.get_or_create(
employee=employee, employer=employer, title=title
)
employ(oracle, alex, "Engineer")
employ(oracle, alex, "Developer")
employ(google, alex, "Engineer")
employ(google, alex, "Manager")
employ(microsoft, alex, "Manager")
employ(intel, alex, "Manager")
employ(microsoft, jane, "Developer")
employ(intel, jane, "Manager")
alex_tech_employers = (
alex.employers.filter(employment__title__in=("Engineer", "Developer"))
.distinct()
.order_by("name")
)
self.assertSequenceEqual(alex_tech_employers, [google, oracle])
alex_nontech_employers = (
alex.employers.exclude(employment__title__in=("Engineer", "Developer"))
.distinct()
.order_by("name")
)
self.assertSequenceEqual(alex_nontech_employers, [google, intel, microsoft])
def test_exclude_reverse_fk_field_ref(self):
tag = Tag.objects.create()
Note.objects.create(tag=tag, note="note")
annotation = Annotation.objects.create(name="annotation", tag=tag)
self.assertEqual(
Annotation.objects.exclude(tag__note__note=F("name")).get(), annotation
)
def test_exclude_with_circular_fk_relation(self):
self.assertEqual(
ObjectB.objects.exclude(objecta__objectb__name=F("name")).count(), 0
)
def test_subquery_exclude_outerref(self):
qs = JobResponsibilities.objects.filter(
Exists(Responsibility.objects.exclude(jobs=OuterRef("job"))),
)
self.assertTrue(qs.exists())
self.r1.delete()
self.assertFalse(qs.exists())
def test_exclude_nullable_fields(self):
number = Number.objects.create(num=1, other_num=1)
Number.objects.create(num=2, other_num=2, another_num=2)
self.assertSequenceEqual(
Number.objects.exclude(other_num=F("another_num")),
[number],
)
self.assertSequenceEqual(
Number.objects.exclude(num=F("another_num")),
[number],
)
def test_exclude_multivalued_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertSequenceEqual(
Job.objects.exclude(responsibilities__description="Programming"),
[self.j1],
)
self.assertIn("exists", captured_queries[0]["sql"].lower())
def test_exclude_subquery(self):
subquery = JobResponsibilities.objects.filter(
responsibility__description="bar",
) | JobResponsibilities.objects.exclude(
job__responsibilities__description="foo",
)
self.assertCountEqual(
Job.objects.annotate(
responsibility=subquery.filter(job=OuterRef("name"),).values(
"id"
)[:1]
),
[self.j1, self.j2],
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_exclude_unsaved_o2o_object(self):
jack = Staff.objects.create(name="jack")
jack_staff = StaffUser.objects.create(staff=jack)
unsaved_object = Staff(name="jane")
self.assertIsNone(unsaved_object.pk)
self.assertSequenceEqual(
StaffUser.objects.exclude(staff=unsaved_object), [jack_staff]
)
def test_exclude_unsaved_object(self):
# These tests will catch ValueError in Django 5.0 when passing unsaved
# model instances to related filters becomes forbidden.
# msg = "Model instances passed to related filters must be saved."
company = Company.objects.create(name="Django")
msg = "Passing unsaved model instances to related filters is deprecated."
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
Employment.objects.exclude(employer=Company(name="unsaved"))
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
Employment.objects.exclude(employer__in=[company, Company(name="unsaved")])
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
StaffUser.objects.exclude(staff=Staff(name="unsaved"))
class ExcludeTest17600(TestCase):
"""
Some regressiontests for ticket #17600. Some of these likely duplicate
other existing tests.
"""
@classmethod
def setUpTestData(cls):
# Create a few Orders.
cls.o1 = Order.objects.create(pk=1)
cls.o2 = Order.objects.create(pk=2)
cls.o3 = Order.objects.create(pk=3)
# Create some OrderItems for the first order with homogeneous
# status_id values
cls.oi1 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi2 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi3 = OrderItem.objects.create(order=cls.o1, status=1)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi4 = OrderItem.objects.create(order=cls.o2, status=1)
cls.oi5 = OrderItem.objects.create(order=cls.o2, status=2)
cls.oi6 = OrderItem.objects.create(order=cls.o2, status=3)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi7 = OrderItem.objects.create(order=cls.o3, status=2)
cls.oi8 = OrderItem.objects.create(order=cls.o3, status=3)
cls.oi9 = OrderItem.objects.create(order=cls.o3, status=4)
def test_exclude_plain(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(items__status=1),
[self.o3],
)
def test_exclude_plain_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(items__status=1).distinct(),
[self.o3],
)
def test_exclude_with_q_object_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(Q(items__status=1)).distinct(),
[self.o3],
)
def test_exclude_with_q_object_no_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(Q(items__status=1)),
[self.o3],
)
def test_exclude_with_q_is_equal_to_plain_exclude(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1).distinct()),
list(Order.objects.exclude(Q(items__status=1)).distinct()),
)
def test_exclude_with_q_is_equal_to_plain_exclude_variation(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1)),
list(Order.objects.exclude(Q(items__status=1)).distinct()),
)
@unittest.expectedFailure
def test_only_orders_with_all_items_having_status_1(self):
"""
This should only return orders having ALL items set to status 1, or
those items not having any orders at all. The correct way to write
this query in SQL seems to be using two nested subqueries.
"""
self.assertQuerysetEqual(
Order.objects.exclude(~Q(items__status=1)).distinct(),
[self.o1],
)
class Exclude15786(TestCase):
"""Regression test for #15786"""
def test_ticket15786(self):
c1 = SimpleCategory.objects.create(name="c1")
c2 = SimpleCategory.objects.create(name="c2")
OneToOneCategory.objects.create(category=c1)
OneToOneCategory.objects.create(category=c2)
rel = CategoryRelationship.objects.create(first=c1, second=c2)
self.assertEqual(
CategoryRelationship.objects.exclude(
first__onetoonecategory=F("second__onetoonecategory")
).get(),
rel,
)
class NullInExcludeTest(TestCase):
@classmethod
def setUpTestData(cls):
NullableName.objects.create(name="i1")
NullableName.objects.create()
def test_null_in_exclude_qs(self):
none_val = "" if connection.features.interprets_empty_strings_as_nulls else None
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[]),
["i1", none_val],
attrgetter("name"),
)
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=["i1"]),
[none_val],
attrgetter("name"),
)
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=["i3"]),
["i1", none_val],
attrgetter("name"),
)
inner_qs = NullableName.objects.filter(name="i1").values_list("name")
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=inner_qs),
[none_val],
attrgetter("name"),
)
# The inner queryset wasn't executed - it should be turned
# into subquery above
self.assertIs(inner_qs._result_cache, None)
@unittest.expectedFailure
def test_col_not_in_list_containing_null(self):
"""
The following case is not handled properly because
SQL's COL NOT IN (list containing null) handling is too weird to
abstract away.
"""
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[None]), ["i1"], attrgetter("name")
)
def test_double_exclude(self):
self.assertEqual(
list(NullableName.objects.filter(~~Q(name="i1"))),
list(NullableName.objects.filter(Q(name="i1"))),
)
self.assertNotIn(
"IS NOT NULL", str(NullableName.objects.filter(~~Q(name="i1")).query)
)
class EmptyStringsAsNullTest(TestCase):
"""
Filtering on non-null character fields works as expected.
The reason for these tests is that Oracle treats '' as NULL, and this
can cause problems in query construction. Refs #17957.
"""
@classmethod
def setUpTestData(cls):
cls.nc = NamedCategory.objects.create(name="")
def test_direct_exclude(self):
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name__in=["nonexistent"]),
[self.nc.pk],
attrgetter("pk"),
)
def test_joined_exclude(self):
self.assertQuerysetEqual(
DumbCategory.objects.exclude(namedcategory__name__in=["nonexistent"]),
[self.nc.pk],
attrgetter("pk"),
)
def test_21001(self):
foo = NamedCategory.objects.create(name="foo")
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name=""), [foo.pk], attrgetter("pk")
)
class ProxyQueryCleanupTest(TestCase):
def test_evaluated_proxy_count(self):
"""
Generating the query string doesn't alter the query's state
in irreversible ways. Refs #18248.
"""
ProxyCategory.objects.create()
qs = ProxyCategory.objects.all()
self.assertEqual(qs.count(), 1)
str(qs.query)
self.assertEqual(qs.count(), 1)
class WhereNodeTest(SimpleTestCase):
class DummyNode:
def as_sql(self, compiler, connection):
return "dummy", []
class MockCompiler:
def compile(self, node):
return node.as_sql(self, connection)
def __call__(self, name):
return connection.ops.quote_name(name)
def test_empty_full_handling_conjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()])
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()])
self.assertEqual(w.as_sql(compiler, connection), ("(dummy AND dummy)", []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("NOT (dummy AND dummy)", []))
w = WhereNode(children=[NothingNode(), self.DummyNode()])
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("", []))
def test_empty_full_handling_disjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()], connector=OR)
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()], connector=OR)
self.assertEqual(w.as_sql(compiler, connection), ("(dummy OR dummy)", []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("NOT (dummy OR dummy)", []))
w = WhereNode(children=[NothingNode(), self.DummyNode()], connector=OR)
self.assertEqual(w.as_sql(compiler, connection), ("dummy", []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("NOT (dummy)", []))
def test_empty_nodes(self):
compiler = WhereNodeTest.MockCompiler()
empty_w = WhereNode()
w = WhereNode(children=[empty_w, empty_w])
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w.negate()
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.connector = OR
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w = WhereNode(children=[empty_w, NothingNode()], connector=OR)
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w = WhereNode(children=[empty_w, NothingNode()], connector=AND)
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
class QuerySetExceptionTests(SimpleTestCase):
def test_invalid_order_by(self):
msg = "Cannot resolve keyword '*' into field. Choices are: created, id, name"
with self.assertRaisesMessage(FieldError, msg):
Article.objects.order_by("*")
def test_invalid_order_by_raw_column_alias(self):
msg = (
"Cannot resolve keyword 'queries_author.name' into field. Choices "
"are: cover, created, creator, creator_id, id, modified, name, "
"note, note_id, tags"
)
with self.assertRaisesMessage(FieldError, msg):
Item.objects.values("creator__name").order_by("queries_author.name")
def test_invalid_queryset_model(self):
msg = 'Cannot use QuerySet for "Article": Use a QuerySet for "ExtraInfo".'
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.filter(extra=Article.objects.all()))
class NullJoinPromotionOrTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.d1 = ModelD.objects.create(name="foo")
d2 = ModelD.objects.create(name="bar")
cls.a1 = ModelA.objects.create(name="a1", d=cls.d1)
c = ModelC.objects.create(name="c")
b = ModelB.objects.create(name="b", c=c)
cls.a2 = ModelA.objects.create(name="a2", b=b, d=d2)
def test_ticket_17886(self):
# The first Q-object is generating the match, the rest of the filters
# should not remove the match even if they do not match anything. The
# problem here was that b__name generates a LOUTER JOIN, then
# b__c__name generates join to c, which the ORM tried to promote but
# failed as that join isn't nullable.
q_obj = Q(d__name="foo") | Q(b__name="foo") | Q(b__c__name="foo")
qset = ModelA.objects.filter(q_obj)
self.assertEqual(list(qset), [self.a1])
# We generate one INNER JOIN to D. The join is direct and not nullable
# so we can use INNER JOIN for it. However, we can NOT use INNER JOIN
# for the b->c join, as a->b is nullable.
self.assertEqual(str(qset.query).count("INNER JOIN"), 1)
def test_isnull_filter_promotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count("LEFT OUTER"), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count("LEFT OUTER"), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count("LEFT OUTER"), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(list(qs), [self.a2])
def test_null_join_demotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=False) & Q(b__name__isnull=True))
self.assertIn(" INNER JOIN ", str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) & Q(b__name__isnull=False))
self.assertIn(" INNER JOIN ", str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=False) | Q(b__name__isnull=True))
self.assertIn(" LEFT OUTER JOIN ", str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) | Q(b__name__isnull=False))
self.assertIn(" LEFT OUTER JOIN ", str(qs.query))
def test_ticket_21366(self):
n = Note.objects.create(note="n", misc="m")
e = ExtraInfo.objects.create(info="info", note=n)
a = Author.objects.create(name="Author1", num=1, extra=e)
Ranking.objects.create(rank=1, author=a)
r1 = Report.objects.create(name="Foo", creator=a)
r2 = Report.objects.create(name="Bar")
Report.objects.create(name="Bar", creator=a)
qs = Report.objects.filter(
Q(creator__ranking__isnull=True) | Q(creator__ranking__rank=1, name="Foo")
)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
self.assertEqual(str(qs.query).count(" JOIN "), 2)
self.assertSequenceEqual(qs.order_by("name"), [r2, r1])
def test_ticket_21748(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
i3 = Identifier.objects.create(name="i3")
Program.objects.create(identifier=i1)
Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
self.assertSequenceEqual(
Identifier.objects.filter(program=None, channel=None), [i3]
)
self.assertSequenceEqual(
Identifier.objects.exclude(program=None, channel=None).order_by("name"),
[i1, i2],
)
def test_ticket_21748_double_negated_and(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
Identifier.objects.create(name="i3")
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
# Check the ~~Q() (or equivalently .exclude(~Q)) works like Q() for
# join promotion.
qs1_doubleneg = Identifier.objects.exclude(
~Q(program__id=p1.id, channel__id=c1.id)
).order_by("pk")
qs1_filter = Identifier.objects.filter(
program__id=p1.id, channel__id=c1.id
).order_by("pk")
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(
str(qs1_filter.query).count("JOIN"), str(qs1_doubleneg.query).count("JOIN")
)
self.assertEqual(2, str(qs1_doubleneg.query).count("INNER JOIN"))
self.assertEqual(
str(qs1_filter.query).count("INNER JOIN"),
str(qs1_doubleneg.query).count("INNER JOIN"),
)
def test_ticket_21748_double_negated_or(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
Identifier.objects.create(name="i3")
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Test OR + doubleneg. The expected result is that channel is LOUTER
# joined, program INNER joined
qs1_filter = Identifier.objects.filter(
Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id)
).order_by("pk")
qs1_doubleneg = Identifier.objects.exclude(
~Q(Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id))
).order_by("pk")
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(
str(qs1_filter.query).count("JOIN"), str(qs1_doubleneg.query).count("JOIN")
)
self.assertEqual(1, str(qs1_doubleneg.query).count("INNER JOIN"))
self.assertEqual(
str(qs1_filter.query).count("INNER JOIN"),
str(qs1_doubleneg.query).count("INNER JOIN"),
)
def test_ticket_21748_complex_filter(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
Identifier.objects.create(name="i3")
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Finally, a more complex case, one time in a way where each
# NOT is pushed to lowest level in the boolean tree, and
# another query where this isn't done.
qs1 = Identifier.objects.filter(
~Q(~Q(program__id=p2.id, channel__id=c1.id) & Q(program__id=p1.id))
).order_by("pk")
qs2 = Identifier.objects.filter(
Q(Q(program__id=p2.id, channel__id=c1.id) | ~Q(program__id=p1.id))
).order_by("pk")
self.assertQuerysetEqual(qs1, qs2, lambda x: x)
self.assertEqual(str(qs1.query).count("JOIN"), str(qs2.query).count("JOIN"))
self.assertEqual(0, str(qs1.query).count("INNER JOIN"))
self.assertEqual(
str(qs1.query).count("INNER JOIN"), str(qs2.query).count("INNER JOIN")
)
class ReverseJoinTrimmingTest(TestCase):
def test_reverse_trimming(self):
# We don't accidentally trim reverse joins - we can't know if there is
# anything on the other side of the join, so trimming reverse joins
# can't be done, ever.
t = Tag.objects.create()
qs = Tag.objects.filter(annotation__tag=t.pk)
self.assertIn("INNER JOIN", str(qs.query))
self.assertEqual(list(qs), [])
class JoinReuseTest(TestCase):
"""
The queries reuse joins sensibly (for example, direct joins
are always reused).
"""
def test_fk_reuse(self):
qs = Annotation.objects.filter(tag__name="foo").filter(tag__name="bar")
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_fk_reuse_select_related(self):
qs = Annotation.objects.filter(tag__name="foo").select_related("tag")
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_fk_reuse_annotation(self):
qs = Annotation.objects.filter(tag__name="foo").annotate(cnt=Count("tag__name"))
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_fk_reuse_disjunction(self):
qs = Annotation.objects.filter(Q(tag__name="foo") | Q(tag__name="bar"))
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_fk_reuse_order_by(self):
qs = Annotation.objects.filter(tag__name="foo").order_by("tag__name")
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_revo2o_reuse(self):
qs = Detail.objects.filter(member__name="foo").filter(member__name="foo")
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_revfk_noreuse(self):
qs = Author.objects.filter(report__name="r4").filter(report__name="r1")
self.assertEqual(str(qs.query).count("JOIN"), 2)
def test_inverted_q_across_relations(self):
"""
When a trimmable join is specified in the query (here school__), the
ORM detects it and removes unnecessary joins. The set of reusable joins
are updated after trimming the query so that other lookups don't
consider that the outer query's filters are in effect for the subquery
(#26551).
"""
springfield_elementary = School.objects.create()
hogward = School.objects.create()
Student.objects.create(school=springfield_elementary)
hp = Student.objects.create(school=hogward)
Classroom.objects.create(school=hogward, name="Potion")
Classroom.objects.create(school=springfield_elementary, name="Main")
qs = Student.objects.filter(
~(
Q(school__classroom__name="Main")
& Q(school__classroom__has_blackboard=None)
)
)
self.assertSequenceEqual(qs, [hp])
class DisjunctionPromotionTests(TestCase):
def test_disjunction_promotion_select_related(self):
fk1 = FK1.objects.create(f1="f1", f2="f2")
basea = BaseA.objects.create(a=fk1)
qs = BaseA.objects.filter(Q(a=fk1) | Q(b=2))
self.assertEqual(str(qs.query).count(" JOIN "), 0)
qs = qs.select_related("a", "b")
self.assertEqual(str(qs.query).count(" INNER JOIN "), 0)
self.assertEqual(str(qs.query).count(" LEFT OUTER JOIN "), 2)
with self.assertNumQueries(1):
self.assertSequenceEqual(qs, [basea])
self.assertEqual(qs[0].a, fk1)
self.assertIs(qs[0].b, None)
def test_disjunction_promotion1(self):
# Pre-existing join, add two ORed filters to the same join,
# all joins can be INNER JOINS.
qs = BaseA.objects.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
qs = qs.filter(Q(b__f1="foo") | Q(b__f2="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
# Reverse the order of AND and OR filters.
qs = BaseA.objects.filter(Q(b__f1="foo") | Q(b__f2="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
qs = qs.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
def test_disjunction_promotion2(self):
qs = BaseA.objects.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
# Now we have two different joins in an ORed condition, these
# must be OUTER joins. The pre-existing join should remain INNER.
qs = qs.filter(Q(b__f1="foo") | Q(c__f2="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
# Reverse case.
qs = BaseA.objects.filter(Q(b__f1="foo") | Q(c__f2="foo"))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
qs = qs.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
def test_disjunction_promotion3(self):
qs = BaseA.objects.filter(a__f2="bar")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
# The ANDed a__f2 filter allows us to use keep using INNER JOIN
# even inside the ORed case. If the join to a__ returns nothing,
# the ANDed filter for a__f2 can't be true.
qs = qs.filter(Q(a__f1="foo") | Q(b__f2="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
def test_disjunction_promotion3_demote(self):
# This one needs demotion logic: the first filter causes a to be
# outer joined, the second filter makes it inner join again.
qs = BaseA.objects.filter(Q(a__f1="foo") | Q(b__f2="foo")).filter(a__f2="bar")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
def test_disjunction_promotion4_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("JOIN"), 0)
# Demote needed for the "a" join. It is marked as outer join by
# above filter (even if it is trimmed away).
qs = qs.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
def test_disjunction_promotion4(self):
qs = BaseA.objects.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
def test_disjunction_promotion5_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
# Note that the above filters on a force the join to an
# inner join even if it is trimmed.
self.assertEqual(str(qs.query).count("JOIN"), 0)
qs = qs.filter(Q(a__f1="foo") | Q(b__f1="foo"))
# So, now the a__f1 join doesn't need promotion.
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
# But b__f1 does.
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
qs = BaseA.objects.filter(Q(a__f1="foo") | Q(b__f1="foo"))
# Now the join to a is created as LOUTER
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
def test_disjunction_promotion6(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("JOIN"), 0)
qs = BaseA.objects.filter(Q(a__f1="foo") & Q(b__f1="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 0)
qs = BaseA.objects.filter(Q(a__f1="foo") & Q(b__f1="foo"))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 0)
def test_disjunction_promotion7(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("JOIN"), 0)
qs = BaseA.objects.filter(Q(a__f1="foo") | (Q(b__f1="foo") & Q(a__f1="bar")))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
qs = BaseA.objects.filter(
(Q(a__f1="foo") | Q(b__f1="foo")) & (Q(a__f1="bar") | Q(c__f1="foo"))
)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 3)
self.assertEqual(str(qs.query).count("INNER JOIN"), 0)
qs = BaseA.objects.filter(
Q(a__f1="foo") | Q(a__f1="bar") & (Q(b__f1="bar") | Q(c__f1="foo"))
)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
def test_disjunction_promotion_fexpression(self):
qs = BaseA.objects.filter(Q(a__f1=F("b__f1")) | Q(b__f1="foo"))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
qs = BaseA.objects.filter(Q(a__f1=F("c__f1")) | Q(b__f1="foo"))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 3)
qs = BaseA.objects.filter(
Q(a__f1=F("b__f1")) | Q(a__f2=F("b__f2")) | Q(c__f1="foo")
)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 3)
qs = BaseA.objects.filter(Q(a__f1=F("c__f1")) | (Q(pk=1) & Q(pk=2)))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
self.assertEqual(str(qs.query).count("INNER JOIN"), 0)
class ManyToManyExcludeTest(TestCase):
def test_exclude_many_to_many(self):
i_extra = Identifier.objects.create(name="extra")
i_program = Identifier.objects.create(name="program")
program = Program.objects.create(identifier=i_program)
i_channel = Identifier.objects.create(name="channel")
channel = Channel.objects.create(identifier=i_channel)
channel.programs.add(program)
# channel contains 'program1', so all Identifiers except that one
# should be returned
self.assertSequenceEqual(
Identifier.objects.exclude(program__channel=channel).order_by("name"),
[i_channel, i_extra],
)
self.assertSequenceEqual(
Identifier.objects.exclude(program__channel=None).order_by("name"),
[i_program],
)
def test_ticket_12823(self):
pg3 = Page.objects.create(text="pg3")
pg2 = Page.objects.create(text="pg2")
pg1 = Page.objects.create(text="pg1")
pa1 = Paragraph.objects.create(text="pa1")
pa1.page.set([pg1, pg2])
pa2 = Paragraph.objects.create(text="pa2")
pa2.page.set([pg2, pg3])
pa3 = Paragraph.objects.create(text="pa3")
ch1 = Chapter.objects.create(title="ch1", paragraph=pa1)
ch2 = Chapter.objects.create(title="ch2", paragraph=pa2)
ch3 = Chapter.objects.create(title="ch3", paragraph=pa3)
b1 = Book.objects.create(title="b1", chapter=ch1)
b2 = Book.objects.create(title="b2", chapter=ch2)
b3 = Book.objects.create(title="b3", chapter=ch3)
q = Book.objects.exclude(chapter__paragraph__page__text="pg1")
self.assertNotIn("IS NOT NULL", str(q.query))
self.assertEqual(len(q), 2)
self.assertNotIn(b1, q)
self.assertIn(b2, q)
self.assertIn(b3, q)
class RelabelCloneTest(TestCase):
def test_ticket_19964(self):
my1 = MyObject.objects.create(data="foo")
my1.parent = my1
my1.save()
my2 = MyObject.objects.create(data="bar", parent=my1)
parents = MyObject.objects.filter(parent=F("id"))
children = MyObject.objects.filter(parent__in=parents).exclude(parent=F("id"))
self.assertEqual(list(parents), [my1])
# Evaluating the children query (which has parents as part of it) does
# not change results for the parents query.
self.assertEqual(list(children), [my2])
self.assertEqual(list(parents), [my1])
class Ticket20101Tests(TestCase):
def test_ticket_20101(self):
"""
Tests QuerySet ORed combining in exclude subquery case.
"""
t = Tag.objects.create(name="foo")
a1 = Annotation.objects.create(tag=t, name="a1")
a2 = Annotation.objects.create(tag=t, name="a2")
a3 = Annotation.objects.create(tag=t, name="a3")
n = Note.objects.create(note="foo", misc="bar")
qs1 = Note.objects.exclude(annotation__in=[a1, a2])
qs2 = Note.objects.filter(annotation__in=[a3])
self.assertIn(n, qs1)
self.assertNotIn(n, qs2)
self.assertIn(n, (qs1 | qs2))
class EmptyStringPromotionTests(SimpleTestCase):
def test_empty_string_promotion(self):
qs = RelatedObject.objects.filter(single__name="")
if connection.features.interprets_empty_strings_as_nulls:
self.assertIn("LEFT OUTER JOIN", str(qs.query))
else:
self.assertNotIn("LEFT OUTER JOIN", str(qs.query))
class ValuesSubqueryTests(TestCase):
def test_values_in_subquery(self):
# If a values() queryset is used, then the given values
# will be used instead of forcing use of the relation's field.
o1 = Order.objects.create(id=-2)
o2 = Order.objects.create(id=-1)
oi1 = OrderItem.objects.create(order=o1, status=0)
oi1.status = oi1.pk
oi1.save()
OrderItem.objects.create(order=o2, status=0)
# The query below should match o1 as it has related order_item
# with id == status.
self.assertSequenceEqual(
Order.objects.filter(items__in=OrderItem.objects.values_list("status")),
[o1],
)
class DoubleInSubqueryTests(TestCase):
def test_double_subquery_in(self):
lfa1 = LeafA.objects.create(data="foo")
lfa2 = LeafA.objects.create(data="bar")
lfb1 = LeafB.objects.create(data="lfb1")
lfb2 = LeafB.objects.create(data="lfb2")
Join.objects.create(a=lfa1, b=lfb1)
Join.objects.create(a=lfa2, b=lfb2)
leaf_as = LeafA.objects.filter(data="foo").values_list("pk", flat=True)
joins = Join.objects.filter(a__in=leaf_as).values_list("b__id", flat=True)
qs = LeafB.objects.filter(pk__in=joins)
self.assertSequenceEqual(qs, [lfb1])
class Ticket18785Tests(SimpleTestCase):
def test_ticket_18785(self):
# Test join trimming from ticket18785
qs = (
Item.objects.exclude(note__isnull=False)
.filter(name="something", creator__extra__isnull=True)
.order_by()
)
self.assertEqual(1, str(qs.query).count("INNER JOIN"))
self.assertEqual(0, str(qs.query).count("OUTER JOIN"))
class Ticket20788Tests(TestCase):
def test_ticket_20788(self):
Paragraph.objects.create()
paragraph = Paragraph.objects.create()
page = paragraph.page.create()
chapter = Chapter.objects.create(paragraph=paragraph)
Book.objects.create(chapter=chapter)
paragraph2 = Paragraph.objects.create()
Page.objects.create()
chapter2 = Chapter.objects.create(paragraph=paragraph2)
book2 = Book.objects.create(chapter=chapter2)
sentences_not_in_pub = Book.objects.exclude(chapter__paragraph__page=page)
self.assertSequenceEqual(sentences_not_in_pub, [book2])
class Ticket12807Tests(TestCase):
def test_ticket_12807(self):
p1 = Paragraph.objects.create()
p2 = Paragraph.objects.create()
# The ORed condition below should have no effect on the query - the
# ~Q(pk__in=[]) will always be True.
qs = Paragraph.objects.filter((Q(pk=p2.pk) | ~Q(pk__in=[])) & Q(pk=p1.pk))
self.assertSequenceEqual(qs, [p1])
class RelatedLookupTypeTests(TestCase):
error = 'Cannot query "%s": Must be "%s" instance.'
@classmethod
def setUpTestData(cls):
cls.oa = ObjectA.objects.create(name="oa")
cls.poa = ProxyObjectA.objects.get(name="oa")
cls.coa = ChildObjectA.objects.create(name="coa")
cls.wrong_type = Order.objects.create(id=cls.oa.pk)
cls.ob = ObjectB.objects.create(name="ob", objecta=cls.oa, num=1)
cls.pob1 = ProxyObjectB.objects.create(name="pob", objecta=cls.oa, num=2)
cls.pob = ProxyObjectB.objects.all()
cls.c = ObjectC.objects.create(childobjecta=cls.coa)
def test_wrong_type_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup.
"""
# Passing incorrect object type
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)
):
ObjectB.objects.get(objecta=self.wrong_type)
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)
):
ObjectB.objects.filter(objecta__in=[self.wrong_type])
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)
):
ObjectB.objects.filter(objecta=self.wrong_type)
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)
):
ObjectA.objects.filter(objectb__in=[self.wrong_type, self.ob])
# Passing an object of the class on which query is done.
with self.assertRaisesMessage(
ValueError, self.error % (self.ob, ObjectA._meta.object_name)
):
ObjectB.objects.filter(objecta__in=[self.poa, self.ob])
with self.assertRaisesMessage(
ValueError, self.error % (self.ob, ChildObjectA._meta.object_name)
):
ObjectC.objects.exclude(childobjecta__in=[self.coa, self.ob])
def test_wrong_backward_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup for backward relations.
"""
with self.assertRaisesMessage(
ValueError, self.error % (self.oa, ObjectB._meta.object_name)
):
ObjectA.objects.filter(objectb__in=[self.oa, self.ob])
with self.assertRaisesMessage(
ValueError, self.error % (self.oa, ObjectB._meta.object_name)
):
ObjectA.objects.exclude(objectb=self.oa)
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)
):
ObjectA.objects.get(objectb=self.wrong_type)
def test_correct_lookup(self):
"""
When passing proxy model objects, child objects, or parent objects,
lookups work fine.
"""
out_a = [self.oa]
out_b = [self.ob, self.pob1]
out_c = [self.c]
# proxy model objects
self.assertSequenceEqual(
ObjectB.objects.filter(objecta=self.poa).order_by("name"), out_b
)
self.assertSequenceEqual(
ObjectA.objects.filter(objectb__in=self.pob).order_by("pk"), out_a * 2
)
# child objects
self.assertSequenceEqual(ObjectB.objects.filter(objecta__in=[self.coa]), [])
self.assertSequenceEqual(
ObjectB.objects.filter(objecta__in=[self.poa, self.coa]).order_by("name"),
out_b,
)
self.assertSequenceEqual(
ObjectB.objects.filter(objecta__in=iter([self.poa, self.coa])).order_by(
"name"
),
out_b,
)
# parent objects
self.assertSequenceEqual(ObjectC.objects.exclude(childobjecta=self.oa), out_c)
# QuerySet related object type checking shouldn't issue queries
# (the querysets aren't evaluated here, hence zero queries) (#23266).
with self.assertNumQueries(0):
ObjectB.objects.filter(objecta__in=ObjectA.objects.all())
def test_values_queryset_lookup(self):
"""
ValueQuerySets are not checked for compatibility with the lookup field.
"""
# Make sure the num and objecta field values match.
ob = ObjectB.objects.get(name="ob")
ob.num = ob.objecta.pk
ob.save()
pob = ObjectB.objects.get(name="pob")
pob.num = pob.objecta.pk
pob.save()
self.assertSequenceEqual(
ObjectB.objects.filter(
objecta__in=ObjectB.objects.values_list("num")
).order_by("pk"),
[ob, pob],
)
class Ticket14056Tests(TestCase):
def test_ticket_14056(self):
s1 = SharedConnection.objects.create(data="s1")
s2 = SharedConnection.objects.create(data="s2")
s3 = SharedConnection.objects.create(data="s3")
PointerA.objects.create(connection=s2)
expected_ordering = (
[s1, s3, s2] if connection.features.nulls_order_largest else [s2, s1, s3]
)
self.assertSequenceEqual(
SharedConnection.objects.order_by("-pointera__connection", "pk"),
expected_ordering,
)
class Ticket20955Tests(TestCase):
def test_ticket_20955(self):
jack = Staff.objects.create(name="jackstaff")
jackstaff = StaffUser.objects.create(staff=jack)
jill = Staff.objects.create(name="jillstaff")
jillstaff = StaffUser.objects.create(staff=jill)
task = Task.objects.create(creator=jackstaff, owner=jillstaff, title="task")
task_get = Task.objects.get(pk=task.pk)
# Load data so that assertNumQueries doesn't complain about the get
# version's queries.
task_get.creator.staffuser.staff
task_get.owner.staffuser.staff
qs = Task.objects.select_related(
"creator__staffuser__staff", "owner__staffuser__staff"
)
self.assertEqual(str(qs.query).count(" JOIN "), 6)
task_select_related = qs.get(pk=task.pk)
with self.assertNumQueries(0):
self.assertEqual(
task_select_related.creator.staffuser.staff,
task_get.creator.staffuser.staff,
)
self.assertEqual(
task_select_related.owner.staffuser.staff,
task_get.owner.staffuser.staff,
)
class Ticket21203Tests(TestCase):
def test_ticket_21203(self):
p = Ticket21203Parent.objects.create(parent_bool=True)
c = Ticket21203Child.objects.create(parent=p)
qs = Ticket21203Child.objects.select_related("parent").defer("parent__created")
self.assertSequenceEqual(qs, [c])
self.assertIs(qs[0].parent.parent_bool, True)
class ValuesJoinPromotionTests(TestCase):
def test_values_no_promotion_for_existing(self):
qs = Node.objects.filter(parent__parent__isnull=False)
self.assertIn(" INNER JOIN ", str(qs.query))
qs = qs.values("parent__parent__id")
self.assertIn(" INNER JOIN ", str(qs.query))
# Make sure there is a left outer join without the filter.
qs = Node.objects.values("parent__parent__id")
self.assertIn(" LEFT OUTER JOIN ", str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = ObjectB.objects.values("objecta__name")
self.assertIn(" INNER JOIN ", str(qs.query))
def test_ticket_21376(self):
a = ObjectA.objects.create()
ObjectC.objects.create(objecta=a)
qs = ObjectC.objects.filter(
Q(objecta=a) | Q(objectb__objecta=a),
)
qs = qs.filter(
Q(objectb=1) | Q(objecta=a),
)
self.assertEqual(qs.count(), 1)
tblname = connection.ops.quote_name(ObjectB._meta.db_table)
self.assertIn(" LEFT OUTER JOIN %s" % tblname, str(qs.query))
class ForeignKeyToBaseExcludeTests(TestCase):
def test_ticket_21787(self):
sc1 = SpecialCategory.objects.create(special_name="sc1", name="sc1")
sc2 = SpecialCategory.objects.create(special_name="sc2", name="sc2")
sc3 = SpecialCategory.objects.create(special_name="sc3", name="sc3")
c1 = CategoryItem.objects.create(category=sc1)
CategoryItem.objects.create(category=sc2)
self.assertSequenceEqual(
SpecialCategory.objects.exclude(categoryitem__id=c1.pk).order_by("name"),
[sc2, sc3],
)
self.assertSequenceEqual(
SpecialCategory.objects.filter(categoryitem__id=c1.pk), [sc1]
)
class ReverseM2MCustomPkTests(TestCase):
def test_ticket_21879(self):
cpt1 = CustomPkTag.objects.create(id="cpt1", tag="cpt1")
cp1 = CustomPk.objects.create(name="cp1", extra="extra")
cp1.custompktag_set.add(cpt1)
self.assertSequenceEqual(CustomPk.objects.filter(custompktag=cpt1), [cp1])
self.assertSequenceEqual(CustomPkTag.objects.filter(custom_pk=cp1), [cpt1])
class Ticket22429Tests(TestCase):
def test_ticket_22429(self):
sc1 = School.objects.create()
st1 = Student.objects.create(school=sc1)
sc2 = School.objects.create()
st2 = Student.objects.create(school=sc2)
cr = Classroom.objects.create(school=sc1)
cr.students.add(st1)
queryset = Student.objects.filter(~Q(classroom__school=F("school")))
self.assertSequenceEqual(queryset, [st2])
class Ticket23605Tests(TestCase):
def test_ticket_23605(self):
# Test filtering on a complicated q-object from ticket's report.
# The query structure is such that we have multiple nested subqueries.
# The original problem was that the inner queries weren't relabeled
# correctly.
# See also #24090.
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=10000.0)
Ticket23605B.objects.create(
field_b0=10000.0, field_b1=True, modelc_fk=c1, modela_fk=a1
)
complex_q = Q(
pk__in=Ticket23605A.objects.filter(
Q(
# True for a1 as field_b0 = 10000, field_c0=10000
# False for a2 as no ticket23605b found
ticket23605b__field_b0__gte=1000000
/ F("ticket23605b__modelc_fk__field_c0")
)
&
# True for a1 (field_b1=True)
Q(ticket23605b__field_b1=True)
& ~Q(
ticket23605b__pk__in=Ticket23605B.objects.filter(
~(
# Same filters as above commented filters, but
# double-negated (one for Q() above, one for
# parentheses). So, again a1 match, a2 not.
Q(field_b1=True)
& Q(field_b0__gte=1000000 / F("modelc_fk__field_c0"))
)
)
)
).filter(ticket23605b__field_b1=True)
)
qs1 = Ticket23605A.objects.filter(complex_q)
self.assertSequenceEqual(qs1, [a1])
qs2 = Ticket23605A.objects.exclude(complex_q)
self.assertSequenceEqual(qs2, [a2])
class TestTicket24279(TestCase):
def test_ticket_24278(self):
School.objects.create()
qs = School.objects.filter(Q(pk__in=()) | Q())
self.assertQuerysetEqual(qs, [])
class TestInvalidValuesRelation(SimpleTestCase):
def test_invalid_values(self):
msg = "Field 'id' expected a number but got 'abc'."
with self.assertRaisesMessage(ValueError, msg):
Annotation.objects.filter(tag="abc")
with self.assertRaisesMessage(ValueError, msg):
Annotation.objects.filter(tag__in=[123, "abc"])
class TestTicket24605(TestCase):
def test_ticket_24605(self):
"""
Subquery table names should be quoted.
"""
i1 = Individual.objects.create(alive=True)
RelatedIndividual.objects.create(related=i1)
i2 = Individual.objects.create(alive=False)
RelatedIndividual.objects.create(related=i2)
i3 = Individual.objects.create(alive=True)
i4 = Individual.objects.create(alive=False)
self.assertSequenceEqual(
Individual.objects.filter(
Q(alive=False), Q(related_individual__isnull=True)
),
[i4],
)
self.assertSequenceEqual(
Individual.objects.exclude(
Q(alive=False), Q(related_individual__isnull=True)
).order_by("pk"),
[i1, i2, i3],
)
class Ticket23622Tests(TestCase):
@skipUnlessDBFeature("can_distinct_on_fields")
def test_ticket_23622(self):
"""
Make sure __pk__in and __in work the same for related fields when
using a distinct on subquery.
"""
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=0.0)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=123,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=23,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=234,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=12,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=567,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=76,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=7,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=56,
field_b1=True,
modelc_fk=c1,
)
qx = Q(
ticket23605b__pk__in=Ticket23605B.objects.order_by(
"modela_fk", "-field_b1"
).distinct("modela_fk")
) & Q(ticket23605b__field_b0__gte=300)
qy = Q(
ticket23605b__in=Ticket23605B.objects.order_by(
"modela_fk", "-field_b1"
).distinct("modela_fk")
) & Q(ticket23605b__field_b0__gte=300)
self.assertEqual(
set(Ticket23605A.objects.filter(qx).values_list("pk", flat=True)),
set(Ticket23605A.objects.filter(qy).values_list("pk", flat=True)),
)
self.assertSequenceEqual(Ticket23605A.objects.filter(qx), [a2])
|
1b4b636f459084a7fe7f57778e4fbe183b7cdc8fe739d3ec71471c01a799108c | """
Various complex queries that have been problematic in the past.
"""
import datetime
from django.db import models
from django.db.models.functions import Now
class DumbCategory(models.Model):
pass
class ProxyCategory(DumbCategory):
class Meta:
proxy = True
class NamedCategory(DumbCategory):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey(
"self",
models.SET_NULL,
blank=True,
null=True,
related_name="children",
)
category = models.ForeignKey(
NamedCategory, models.SET_NULL, null=True, default=None
)
class Meta:
ordering = ["name"]
def __str__(self):
return self.name
class Note(models.Model):
note = models.CharField(max_length=100)
misc = models.CharField(max_length=25)
tag = models.ForeignKey(Tag, models.SET_NULL, blank=True, null=True)
negate = models.BooleanField(default=True)
class Meta:
ordering = ["note"]
def __str__(self):
return self.note
class Annotation(models.Model):
name = models.CharField(max_length=10)
tag = models.ForeignKey(Tag, models.CASCADE)
notes = models.ManyToManyField(Note)
def __str__(self):
return self.name
class DateTimePK(models.Model):
date = models.DateTimeField(primary_key=True, default=datetime.datetime.now)
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note, models.CASCADE, null=True)
value = models.IntegerField(null=True)
date = models.ForeignKey(DateTimePK, models.SET_NULL, null=True)
filterable = models.BooleanField(default=True)
class Meta:
ordering = ["info"]
def __str__(self):
return self.info
class Author(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo, models.CASCADE)
class Meta:
ordering = ["name"]
def __str__(self):
return self.name
class Item(models.Model):
name = models.CharField(max_length=10)
created = models.DateTimeField()
modified = models.DateTimeField(blank=True, null=True)
tags = models.ManyToManyField(Tag, blank=True)
creator = models.ForeignKey(Author, models.CASCADE)
note = models.ForeignKey(Note, models.CASCADE)
class Meta:
ordering = ["-note", "name"]
def __str__(self):
return self.name
class Report(models.Model):
name = models.CharField(max_length=10)
creator = models.ForeignKey(Author, models.SET_NULL, to_field="num", null=True)
def __str__(self):
return self.name
class ReportComment(models.Model):
report = models.ForeignKey(Report, models.CASCADE)
class Ranking(models.Model):
rank = models.IntegerField()
author = models.ForeignKey(Author, models.CASCADE)
class Meta:
# A complex ordering specification. Should stress the system a bit.
ordering = ("author__extra__note", "author__name", "rank")
def __str__(self):
return "%d: %s" % (self.rank, self.author.name)
class Cover(models.Model):
title = models.CharField(max_length=50)
item = models.ForeignKey(Item, models.CASCADE)
class Meta:
ordering = ["item"]
def __str__(self):
return self.title
class Number(models.Model):
num = models.IntegerField()
other_num = models.IntegerField(null=True)
another_num = models.IntegerField(null=True)
def __str__(self):
return str(self.num)
# Symmetrical m2m field with a normal field using the reverse accessor name
# ("valid").
class Valid(models.Model):
valid = models.CharField(max_length=10)
parent = models.ManyToManyField("self")
class Meta:
ordering = ["valid"]
# Some funky cross-linked models for testing a couple of infinite recursion
# cases.
class X(models.Model):
y = models.ForeignKey("Y", models.CASCADE)
class Y(models.Model):
x1 = models.ForeignKey(X, models.CASCADE, related_name="y1")
# Some models with a cycle in the default ordering. This would be bad if we
# didn't catch the infinite loop.
class LoopX(models.Model):
y = models.ForeignKey("LoopY", models.CASCADE)
class Meta:
ordering = ["y"]
class LoopY(models.Model):
x = models.ForeignKey(LoopX, models.CASCADE)
class Meta:
ordering = ["x"]
class LoopZ(models.Model):
z = models.ForeignKey("self", models.CASCADE)
class Meta:
ordering = ["z"]
# A model and custom default manager combination.
class CustomManager(models.Manager):
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(public=True, tag__name="t1")
class ManagedModel(models.Model):
data = models.CharField(max_length=10)
tag = models.ForeignKey(Tag, models.CASCADE)
public = models.BooleanField(default=True)
objects = CustomManager()
normal_manager = models.Manager()
def __str__(self):
return self.data
# An inter-related setup with multiple paths from Child to Detail.
class Detail(models.Model):
data = models.CharField(max_length=10)
class MemberManager(models.Manager):
def get_queryset(self):
return super().get_queryset().select_related("details")
class Member(models.Model):
name = models.CharField(max_length=10)
details = models.OneToOneField(Detail, models.CASCADE, primary_key=True)
objects = MemberManager()
class Child(models.Model):
person = models.OneToOneField(Member, models.CASCADE, primary_key=True)
parent = models.ForeignKey(Member, models.CASCADE, related_name="children")
# Custom primary keys interfered with ordering in the past.
class CustomPk(models.Model):
name = models.CharField(max_length=10, primary_key=True)
extra = models.CharField(max_length=10)
class Meta:
ordering = ["name", "extra"]
class Related(models.Model):
custom = models.ForeignKey(CustomPk, models.CASCADE, null=True)
class CustomPkTag(models.Model):
id = models.CharField(max_length=20, primary_key=True)
custom_pk = models.ManyToManyField(CustomPk)
tag = models.CharField(max_length=20)
# An inter-related setup with a model subclass that has a nullable
# path to another model, and a return path from that model.
class Celebrity(models.Model):
name = models.CharField("Name", max_length=20)
greatest_fan = models.ForeignKey("Fan", models.SET_NULL, null=True, unique=True)
def __str__(self):
return self.name
class TvChef(Celebrity):
pass
class Fan(models.Model):
fan_of = models.ForeignKey(Celebrity, models.CASCADE)
# Multiple foreign keys
class LeafA(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class LeafB(models.Model):
data = models.CharField(max_length=10)
class Join(models.Model):
a = models.ForeignKey(LeafA, models.CASCADE)
b = models.ForeignKey(LeafB, models.CASCADE)
class ReservedName(models.Model):
name = models.CharField(max_length=20)
order = models.IntegerField()
def __str__(self):
return self.name
# A simpler shared-foreign-key setup that can expose some problems.
class SharedConnection(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class PointerA(models.Model):
connection = models.ForeignKey(SharedConnection, models.CASCADE)
class PointerB(models.Model):
connection = models.ForeignKey(SharedConnection, models.CASCADE)
# Multi-layer ordering
class SingleObject(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ["name"]
def __str__(self):
return self.name
class RelatedObject(models.Model):
single = models.ForeignKey(SingleObject, models.SET_NULL, null=True)
f = models.IntegerField(null=True)
class Meta:
ordering = ["single"]
class Plaything(models.Model):
name = models.CharField(max_length=10)
others = models.ForeignKey(RelatedObject, models.SET_NULL, null=True)
class Meta:
ordering = ["others"]
def __str__(self):
return self.name
class Article(models.Model):
name = models.CharField(max_length=20)
created = models.DateTimeField()
def __str__(self):
return self.name
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
class Eaten(models.Model):
food = models.ForeignKey(Food, models.SET_NULL, to_field="name", null=True)
meal = models.CharField(max_length=20)
def __str__(self):
return "%s at %s" % (self.food, self.meal)
class Node(models.Model):
num = models.IntegerField(unique=True)
parent = models.ForeignKey("self", models.SET_NULL, to_field="num", null=True)
def __str__(self):
return str(self.num)
# Bug #12252
class ObjectA(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
def __iter__(self):
# Ticket #23721
assert False, "type checking should happen without calling model __iter__"
class ProxyObjectA(ObjectA):
class Meta:
proxy = True
class ChildObjectA(ObjectA):
pass
class ObjectB(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA, models.CASCADE)
num = models.PositiveIntegerField()
def __str__(self):
return self.name
class ProxyObjectB(ObjectB):
class Meta:
proxy = True
class ObjectC(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA, models.SET_NULL, null=True)
objectb = models.ForeignKey(ObjectB, models.SET_NULL, null=True)
childobjecta = models.ForeignKey(
ChildObjectA, models.SET_NULL, null=True, related_name="ca_pk"
)
def __str__(self):
return self.name
class SimpleCategory(models.Model):
name = models.CharField(max_length=25)
def __str__(self):
return self.name
class SpecialCategory(SimpleCategory):
special_name = models.CharField(max_length=35)
def __str__(self):
return self.name + " " + self.special_name
class CategoryItem(models.Model):
category = models.ForeignKey(SimpleCategory, models.CASCADE)
def __str__(self):
return "category item: " + str(self.category)
class MixedCaseFieldCategoryItem(models.Model):
CaTeGoRy = models.ForeignKey(SimpleCategory, models.CASCADE)
class MixedCaseDbColumnCategoryItem(models.Model):
category = models.ForeignKey(
SimpleCategory, models.CASCADE, db_column="CaTeGoRy_Id"
)
class OneToOneCategory(models.Model):
new_name = models.CharField(max_length=15)
category = models.OneToOneField(SimpleCategory, models.CASCADE)
def __str__(self):
return "one2one " + self.new_name
class CategoryRelationship(models.Model):
first = models.ForeignKey(SimpleCategory, models.CASCADE, related_name="first_rel")
second = models.ForeignKey(
SimpleCategory, models.CASCADE, related_name="second_rel"
)
class CommonMixedCaseForeignKeys(models.Model):
category = models.ForeignKey(CategoryItem, models.CASCADE)
mixed_case_field_category = models.ForeignKey(
MixedCaseFieldCategoryItem, models.CASCADE
)
mixed_case_db_column_category = models.ForeignKey(
MixedCaseDbColumnCategoryItem, models.CASCADE
)
class NullableName(models.Model):
name = models.CharField(max_length=20, null=True)
class Meta:
ordering = ["id"]
class ModelD(models.Model):
name = models.TextField()
class ModelC(models.Model):
name = models.TextField()
class ModelB(models.Model):
name = models.TextField()
c = models.ForeignKey(ModelC, models.CASCADE)
class ModelA(models.Model):
name = models.TextField()
b = models.ForeignKey(ModelB, models.SET_NULL, null=True)
d = models.ForeignKey(ModelD, models.CASCADE)
class Job(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
class JobResponsibilities(models.Model):
job = models.ForeignKey(Job, models.CASCADE, to_field="name")
responsibility = models.ForeignKey(
"Responsibility", models.CASCADE, to_field="description"
)
class Responsibility(models.Model):
description = models.CharField(max_length=20, unique=True)
jobs = models.ManyToManyField(
Job, through=JobResponsibilities, related_name="responsibilities"
)
def __str__(self):
return self.description
# Models for disjunction join promotion low level testing.
class FK1(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK2(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK3(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class BaseA(models.Model):
a = models.ForeignKey(FK1, models.SET_NULL, null=True)
b = models.ForeignKey(FK2, models.SET_NULL, null=True)
c = models.ForeignKey(FK3, models.SET_NULL, null=True)
class Identifier(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Program(models.Model):
identifier = models.OneToOneField(Identifier, models.CASCADE)
class Channel(models.Model):
programs = models.ManyToManyField(Program)
identifier = models.OneToOneField(Identifier, models.CASCADE)
class Book(models.Model):
title = models.TextField()
chapter = models.ForeignKey("Chapter", models.CASCADE)
class Chapter(models.Model):
title = models.TextField()
paragraph = models.ForeignKey("Paragraph", models.CASCADE)
class Paragraph(models.Model):
text = models.TextField()
page = models.ManyToManyField("Page")
class Page(models.Model):
text = models.TextField()
class MyObject(models.Model):
parent = models.ForeignKey(
"self", models.SET_NULL, null=True, blank=True, related_name="children"
)
data = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
# Models for #17600 regressions
class Order(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=12, null=True, default="")
class Meta:
ordering = ("pk",)
def __str__(self):
return str(self.pk)
class OrderItem(models.Model):
order = models.ForeignKey(Order, models.CASCADE, related_name="items")
status = models.IntegerField()
class Meta:
ordering = ("pk",)
def __str__(self):
return str(self.pk)
class BaseUser(models.Model):
annotation = models.ForeignKey(Annotation, models.CASCADE, null=True, blank=True)
class Task(models.Model):
title = models.CharField(max_length=10)
owner = models.ForeignKey(BaseUser, models.CASCADE, related_name="owner")
creator = models.ForeignKey(BaseUser, models.CASCADE, related_name="creator")
note = models.ForeignKey(Note, on_delete=models.CASCADE, null=True, blank=True)
def __str__(self):
return self.title
class Staff(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
class StaffUser(BaseUser):
staff = models.OneToOneField(Staff, models.CASCADE, related_name="user")
def __str__(self):
return str(self.staff)
class Ticket21203Parent(models.Model):
parentid = models.AutoField(primary_key=True)
parent_bool = models.BooleanField(default=True)
created = models.DateTimeField(auto_now=True)
class Ticket21203Child(models.Model):
childid = models.AutoField(primary_key=True)
parent = models.ForeignKey(Ticket21203Parent, models.CASCADE)
class Person(models.Model):
name = models.CharField(max_length=128)
class Company(models.Model):
name = models.CharField(max_length=128)
employees = models.ManyToManyField(
Person, related_name="employers", through="Employment"
)
def __str__(self):
return self.name
class Employment(models.Model):
employer = models.ForeignKey(Company, models.CASCADE)
employee = models.ForeignKey(Person, models.CASCADE)
title = models.CharField(max_length=128)
class School(models.Model):
pass
class Student(models.Model):
school = models.ForeignKey(School, models.CASCADE)
class Classroom(models.Model):
name = models.CharField(max_length=20)
has_blackboard = models.BooleanField(null=True)
school = models.ForeignKey(School, models.CASCADE)
students = models.ManyToManyField(Student, related_name="classroom")
class Teacher(models.Model):
schools = models.ManyToManyField(School)
friends = models.ManyToManyField("self")
class Ticket23605AParent(models.Model):
pass
class Ticket23605A(Ticket23605AParent):
pass
class Ticket23605B(models.Model):
modela_fk = models.ForeignKey(Ticket23605A, models.CASCADE)
modelc_fk = models.ForeignKey("Ticket23605C", models.CASCADE)
field_b0 = models.IntegerField(null=True)
field_b1 = models.BooleanField(default=False)
class Ticket23605C(models.Model):
field_c0 = models.FloatField()
# db_table names have capital letters to ensure they are quoted in queries.
class Individual(models.Model):
alive = models.BooleanField()
class Meta:
db_table = "Individual"
class RelatedIndividual(models.Model):
related = models.ForeignKey(
Individual, models.CASCADE, related_name="related_individual"
)
class Meta:
db_table = "RelatedIndividual"
class CustomDbColumn(models.Model):
custom_column = models.IntegerField(db_column="custom_name", null=True)
ip_address = models.GenericIPAddressField(null=True)
class CreatedField(models.DateTimeField):
db_returning = True
def __init__(self, *args, **kwargs):
kwargs.setdefault("default", Now)
super().__init__(*args, **kwargs)
class ReturningModel(models.Model):
created = CreatedField(editable=False)
class NonIntegerPKReturningModel(models.Model):
created = CreatedField(editable=False, primary_key=True)
class JSONFieldNullable(models.Model):
json_field = models.JSONField(blank=True, null=True)
class Meta:
required_db_features = {"supports_json_field"}
|
a32d63a7444f652f48c099a883ab58325dcd92c65edc6a4d5b39d78050b8205a | import datetime
import itertools
import unittest
from copy import copy
from unittest import mock
from django.core.exceptions import FieldError
from django.core.management.color import no_style
from django.db import (
DatabaseError,
DataError,
IntegrityError,
OperationalError,
connection,
)
from django.db.models import (
CASCADE,
PROTECT,
AutoField,
BigAutoField,
BigIntegerField,
BinaryField,
BooleanField,
CharField,
CheckConstraint,
DateField,
DateTimeField,
DecimalField,
DurationField,
F,
FloatField,
ForeignKey,
ForeignObject,
Index,
IntegerField,
JSONField,
ManyToManyField,
Model,
OneToOneField,
OrderBy,
PositiveIntegerField,
Q,
SlugField,
SmallAutoField,
SmallIntegerField,
TextField,
TimeField,
UniqueConstraint,
UUIDField,
Value,
)
from django.db.models.fields.json import KeyTextTransform
from django.db.models.functions import Abs, Cast, Collate, Lower, Random, Upper
from django.db.models.indexes import IndexExpression
from django.db.transaction import TransactionManagementError, atomic
from django.test import (
TransactionTestCase,
ignore_warnings,
skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext, isolate_apps, register_lookup
from django.utils.deprecation import RemovedInDjango51Warning
from .fields import CustomManyToManyField, InheritedManyToManyField, MediumBlobField
from .models import (
Author,
AuthorCharFieldWithIndex,
AuthorTextFieldWithIndex,
AuthorWithDefaultHeight,
AuthorWithEvenLongerName,
AuthorWithIndexedName,
AuthorWithUniqueName,
AuthorWithUniqueNameAndBirthday,
Book,
BookForeignObj,
BookWeak,
BookWithLongName,
BookWithO2O,
BookWithoutAuthor,
BookWithSlug,
IntegerPK,
Node,
Note,
NoteRename,
Tag,
TagM2MTest,
TagUniqueRename,
Thing,
UniqueTest,
new_apps,
)
class SchemaTests(TransactionTestCase):
"""
Tests for the schema-alteration code.
Be aware that these tests are more liable than most to false results,
as sometimes the code to check if a test has worked is almost as complex
as the code it is testing.
"""
available_apps = []
models = [
Author,
AuthorCharFieldWithIndex,
AuthorTextFieldWithIndex,
AuthorWithDefaultHeight,
AuthorWithEvenLongerName,
Book,
BookWeak,
BookWithLongName,
BookWithO2O,
BookWithSlug,
IntegerPK,
Node,
Note,
Tag,
TagM2MTest,
TagUniqueRename,
Thing,
UniqueTest,
]
# Utility functions
def setUp(self):
# local_models should contain test dependent model classes that will be
# automatically removed from the app cache on test tear down.
self.local_models = []
# isolated_local_models contains models that are in test methods
# decorated with @isolate_apps.
self.isolated_local_models = []
def tearDown(self):
# Delete any tables made for our models
self.delete_tables()
new_apps.clear_cache()
for model in new_apps.get_models():
model._meta._expire_cache()
if "schema" in new_apps.all_models:
for model in self.local_models:
for many_to_many in model._meta.many_to_many:
through = many_to_many.remote_field.through
if through and through._meta.auto_created:
del new_apps.all_models["schema"][through._meta.model_name]
del new_apps.all_models["schema"][model._meta.model_name]
if self.isolated_local_models:
with connection.schema_editor() as editor:
for model in self.isolated_local_models:
editor.delete_model(model)
def delete_tables(self):
"Deletes all model tables for our models for a clean test environment"
converter = connection.introspection.identifier_converter
with connection.schema_editor() as editor:
connection.disable_constraint_checking()
table_names = connection.introspection.table_names()
if connection.features.ignores_table_name_case:
table_names = [table_name.lower() for table_name in table_names]
for model in itertools.chain(SchemaTests.models, self.local_models):
tbl = converter(model._meta.db_table)
if connection.features.ignores_table_name_case:
tbl = tbl.lower()
if tbl in table_names:
editor.delete_model(model)
table_names.remove(tbl)
connection.enable_constraint_checking()
def column_classes(self, model):
with connection.cursor() as cursor:
columns = {
d[0]: (connection.introspection.get_field_type(d[1], d), d)
for d in connection.introspection.get_table_description(
cursor,
model._meta.db_table,
)
}
# SQLite has a different format for field_type
for name, (type, desc) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
return columns
def get_primary_key(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_primary_key_column(cursor, table)
def get_indexes(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return [
c["columns"][0]
for c in connection.introspection.get_constraints(
cursor, table
).values()
if c["index"] and len(c["columns"]) == 1
]
def get_uniques(self, table):
with connection.cursor() as cursor:
return [
c["columns"][0]
for c in connection.introspection.get_constraints(
cursor, table
).values()
if c["unique"] and len(c["columns"]) == 1
]
def get_constraints(self, table):
"""
Get the constraints on a table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def get_constraints_for_column(self, model, column_name):
constraints = self.get_constraints(model._meta.db_table)
constraints_for_column = []
for name, details in constraints.items():
if details["columns"] == [column_name]:
constraints_for_column.append(name)
return sorted(constraints_for_column)
def check_added_field_default(
self,
schema_editor,
model,
field,
field_name,
expected_default,
cast_function=None,
):
with connection.cursor() as cursor:
schema_editor.add_field(model, field)
cursor.execute(
"SELECT {} FROM {};".format(field_name, model._meta.db_table)
)
database_default = cursor.fetchall()[0][0]
if cast_function and type(database_default) != type(expected_default):
database_default = cast_function(database_default)
self.assertEqual(database_default, expected_default)
def get_constraints_count(self, table, column, fk_to):
"""
Return a dict with keys 'fks', 'uniques, and 'indexes' indicating the
number of foreign keys, unique constraints, and indexes on
`table`.`column`. The `fk_to` argument is a 2-tuple specifying the
expected foreign key relationship's (table, column).
"""
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, table)
counts = {"fks": 0, "uniques": 0, "indexes": 0}
for c in constraints.values():
if c["columns"] == [column]:
if c["foreign_key"] == fk_to:
counts["fks"] += 1
if c["unique"]:
counts["uniques"] += 1
elif c["index"]:
counts["indexes"] += 1
return counts
def get_column_collation(self, table, column):
with connection.cursor() as cursor:
return next(
f.collation
for f in connection.introspection.get_table_description(cursor, table)
if f.name == column
)
def assertIndexOrder(self, table, index, order):
constraints = self.get_constraints(table)
self.assertIn(index, constraints)
index_orders = constraints[index]["orders"]
self.assertTrue(
all(val == expected for val, expected in zip(index_orders, order))
)
def assertForeignKeyExists(self, model, column, expected_fk_table, field="id"):
"""
Fail if the FK constraint on `model.Meta.db_table`.`column` to
`expected_fk_table`.id doesn't exist.
"""
if not connection.features.can_introspect_foreign_keys:
return
constraints = self.get_constraints(model._meta.db_table)
constraint_fk = None
for details in constraints.values():
if details["columns"] == [column] and details["foreign_key"]:
constraint_fk = details["foreign_key"]
break
self.assertEqual(constraint_fk, (expected_fk_table, field))
def assertForeignKeyNotExists(self, model, column, expected_fk_table):
if not connection.features.can_introspect_foreign_keys:
return
with self.assertRaises(AssertionError):
self.assertForeignKeyExists(model, column, expected_fk_table)
# Tests
def test_creation_deletion(self):
"""
Tries creating a model's table, and then deleting it.
"""
with connection.schema_editor() as editor:
# Create the table
editor.create_model(Author)
# The table is there
list(Author.objects.all())
# Clean up that table
editor.delete_model(Author)
# No deferred SQL should be left over.
self.assertEqual(editor.deferred_sql, [])
# The table is gone
with self.assertRaises(DatabaseError):
list(Author.objects.all())
@skipUnlessDBFeature("supports_foreign_keys")
def test_fk(self):
"Creating tables out of FK order, then repointing, works"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Book)
editor.create_model(Author)
editor.create_model(Tag)
# Initial tables are there
list(Author.objects.all())
list(Book.objects.all())
# Make sure the FK constraint is present
with self.assertRaises(IntegrityError):
Book.objects.create(
author_id=1,
title="Much Ado About Foreign Keys",
pub_date=datetime.datetime.now(),
)
# Repoint the FK constraint
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Tag, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
self.assertForeignKeyExists(Book, "author_id", "schema_tag")
@skipUnlessDBFeature("can_create_inline_fk")
def test_inline_fk(self):
# Create some tables.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.create_model(Note)
self.assertForeignKeyNotExists(Note, "book_id", "schema_book")
# Add a foreign key from one to the other.
with connection.schema_editor() as editor:
new_field = ForeignKey(Book, CASCADE)
new_field.set_attributes_from_name("book")
editor.add_field(Note, new_field)
self.assertForeignKeyExists(Note, "book_id", "schema_book")
# Creating a FK field with a constraint uses a single statement without
# a deferred ALTER TABLE.
self.assertFalse(
[
sql
for sql in (str(statement) for statement in editor.deferred_sql)
if sql.startswith("ALTER TABLE") and "ADD CONSTRAINT" in sql
]
)
@skipUnlessDBFeature("can_create_inline_fk")
def test_add_inline_fk_update_data(self):
with connection.schema_editor() as editor:
editor.create_model(Node)
# Add an inline foreign key and update data in the same transaction.
new_field = ForeignKey(Node, CASCADE, related_name="new_fk", null=True)
new_field.set_attributes_from_name("new_parent_fk")
parent = Node.objects.create()
with connection.schema_editor() as editor:
editor.add_field(Node, new_field)
editor.execute("UPDATE schema_node SET new_parent_fk_id = %s;", [parent.pk])
assertIndex = (
self.assertIn
if connection.features.indexes_foreign_keys
else self.assertNotIn
)
assertIndex("new_parent_fk_id", self.get_indexes(Node._meta.db_table))
@skipUnlessDBFeature(
"can_create_inline_fk",
"allows_multiple_constraints_on_same_fields",
)
@isolate_apps("schema")
def test_add_inline_fk_index_update_data(self):
class Node(Model):
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Node)
# Add an inline foreign key, update data, and an index in the same
# transaction.
new_field = ForeignKey(Node, CASCADE, related_name="new_fk", null=True)
new_field.set_attributes_from_name("new_parent_fk")
parent = Node.objects.create()
with connection.schema_editor() as editor:
editor.add_field(Node, new_field)
Node._meta.add_field(new_field)
editor.execute("UPDATE schema_node SET new_parent_fk_id = %s;", [parent.pk])
editor.add_index(
Node, Index(fields=["new_parent_fk"], name="new_parent_inline_fk_idx")
)
self.assertIn("new_parent_fk_id", self.get_indexes(Node._meta.db_table))
@skipUnlessDBFeature("supports_foreign_keys")
def test_char_field_with_db_index_to_fk(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorCharFieldWithIndex)
# Change CharField to FK
old_field = AuthorCharFieldWithIndex._meta.get_field("char_field")
new_field = ForeignKey(Author, CASCADE, blank=True)
new_field.set_attributes_from_name("char_field")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorCharFieldWithIndex, old_field, new_field, strict=True
)
self.assertForeignKeyExists(
AuthorCharFieldWithIndex, "char_field_id", "schema_author"
)
@skipUnlessDBFeature("supports_foreign_keys")
@skipUnlessDBFeature("supports_index_on_text_field")
def test_text_field_with_db_index_to_fk(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorTextFieldWithIndex)
# Change TextField to FK
old_field = AuthorTextFieldWithIndex._meta.get_field("text_field")
new_field = ForeignKey(Author, CASCADE, blank=True)
new_field.set_attributes_from_name("text_field")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorTextFieldWithIndex, old_field, new_field, strict=True
)
self.assertForeignKeyExists(
AuthorTextFieldWithIndex, "text_field_id", "schema_author"
)
@isolate_apps("schema")
def test_char_field_pk_to_auto_field(self):
class Foo(Model):
id = CharField(max_length=255, primary_key=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Foo
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
@skipUnlessDBFeature("supports_foreign_keys")
def test_fk_to_proxy(self):
"Creating a FK to a proxy model creates database constraints."
class AuthorProxy(Author):
class Meta:
app_label = "schema"
apps = new_apps
proxy = True
class AuthorRef(Model):
author = ForeignKey(AuthorProxy, on_delete=CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [AuthorProxy, AuthorRef]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorRef)
self.assertForeignKeyExists(AuthorRef, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_fk_db_constraint(self):
"The db_constraint parameter is respected"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Author)
editor.create_model(BookWeak)
# Initial tables are there
list(Author.objects.all())
list(Tag.objects.all())
list(BookWeak.objects.all())
self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author")
# Make a db_constraint=False FK
new_field = ForeignKey(Tag, CASCADE, db_constraint=False)
new_field.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
self.assertForeignKeyNotExists(Author, "tag_id", "schema_tag")
# Alter to one with a constraint
new_field2 = ForeignKey(Tag, CASCADE)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
self.assertForeignKeyExists(Author, "tag_id", "schema_tag")
# Alter to one without a constraint again
new_field2 = ForeignKey(Tag, CASCADE)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field2, new_field, strict=True)
self.assertForeignKeyNotExists(Author, "tag_id", "schema_tag")
@isolate_apps("schema")
def test_no_db_constraint_added_during_primary_key_change(self):
"""
When a primary key that's pointed to by a ForeignKey with
db_constraint=False is altered, a foreign key constraint isn't added.
"""
class Author(Model):
class Meta:
app_label = "schema"
class BookWeak(Model):
author = ForeignKey(Author, CASCADE, db_constraint=False)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWeak)
self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author")
old_field = Author._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = Author
new_field.set_attributes_from_name("id")
# @isolate_apps() and inner models are needed to have the model
# relations populated, otherwise this doesn't act as a regression test.
self.assertEqual(len(new_field.model._meta.related_objects), 1)
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author")
def _test_m2m_db_constraint(self, M2MFieldClass):
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(LocalAuthorWithM2M)
# Initial tables are there
list(LocalAuthorWithM2M.objects.all())
list(Tag.objects.all())
# Make a db_constraint=False FK
new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False)
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
self.assertForeignKeyNotExists(
new_field.remote_field.through, "tag_id", "schema_tag"
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_m2m_db_constraint(self):
self._test_m2m_db_constraint(ManyToManyField)
@skipUnlessDBFeature("supports_foreign_keys")
def test_m2m_db_constraint_custom(self):
self._test_m2m_db_constraint(CustomManyToManyField)
@skipUnlessDBFeature("supports_foreign_keys")
def test_m2m_db_constraint_inherited(self):
self._test_m2m_db_constraint(InheritedManyToManyField)
def test_add_field(self):
"""
Tests adding fields to models
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add the new field
new_field = IntegerField(null=True)
new_field.set_attributes_from_name("age")
with CaptureQueriesContext(
connection
) as ctx, connection.schema_editor() as editor:
editor.add_field(Author, new_field)
drop_default_sql = editor.sql_alter_column_no_default % {
"column": editor.quote_name(new_field.name),
}
self.assertFalse(
any(drop_default_sql in query["sql"] for query in ctx.captured_queries)
)
# Table is not rebuilt.
self.assertIs(
any("CREATE TABLE" in query["sql"] for query in ctx.captured_queries), False
)
self.assertIs(
any("DROP TABLE" in query["sql"] for query in ctx.captured_queries), False
)
columns = self.column_classes(Author)
self.assertEqual(
columns["age"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertTrue(columns["age"][1][6])
def test_add_field_remove_field(self):
"""
Adding a field and removing it removes all deferred sql referring to it.
"""
with connection.schema_editor() as editor:
# Create a table with a unique constraint on the slug field.
editor.create_model(Tag)
# Remove the slug column.
editor.remove_field(Tag, Tag._meta.get_field("slug"))
self.assertEqual(editor.deferred_sql, [])
def test_add_field_temp_default(self):
"""
Tests adding fields to models with a temporary default
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = CharField(max_length=30, default="Godwin")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertEqual(
columns["surname"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(
columns["surname"][1][6],
connection.features.interprets_empty_strings_as_nulls,
)
def test_add_field_temp_default_boolean(self):
"""
Tests adding fields to models with a temporary default where
the default is False. (#21783)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = BooleanField(default=False)
new_field.set_attributes_from_name("awesome")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# BooleanField are stored as TINYINT(1) on MySQL.
field_type = columns["awesome"][0]
self.assertEqual(
field_type, connection.features.introspected_field_types["BooleanField"]
)
def test_add_field_default_transform(self):
"""
Tests adding fields to models with a default that is not directly
valid in the database (#22581)
"""
class TestTransformField(IntegerField):
# Weird field that saves the count of items in its value
def get_default(self):
return self.default
def get_prep_value(self, value):
if value is None:
return 0
return len(value)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add the field with a default it needs to cast (to string in this case)
new_field = TestTransformField(default={1: 2})
new_field.set_attributes_from_name("thing")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is there
columns = self.column_classes(Author)
field_type, field_info = columns["thing"]
self.assertEqual(
field_type, connection.features.introspected_field_types["IntegerField"]
)
# Make sure the values were transformed correctly
self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2)
def test_add_field_o2o_nullable(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Note)
new_field = OneToOneField(Note, CASCADE, null=True)
new_field.set_attributes_from_name("note")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertIn("note_id", columns)
self.assertTrue(columns["note_id"][1][6])
def test_add_field_binary(self):
"""
Tests binary fields get a sane default (#22851)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field
new_field = BinaryField(blank=True)
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# MySQL annoyingly uses the same backend, so it'll come back as one of
# these two types.
self.assertIn(columns["bits"][0], ("BinaryField", "TextField"))
def test_add_field_durationfield_with_default(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
new_field = DurationField(default=datetime.timedelta(minutes=10))
new_field.set_attributes_from_name("duration")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertEqual(
columns["duration"][0],
connection.features.introspected_field_types["DurationField"],
)
@unittest.skipUnless(connection.vendor == "mysql", "MySQL specific")
def test_add_binaryfield_mediumblob(self):
"""
Test adding a custom-sized binary field on MySQL (#24846).
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field with default
new_field = MediumBlobField(blank=True, default=b"123")
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# Introspection treats BLOBs as TextFields
self.assertEqual(columns["bits"][0], "TextField")
@isolate_apps("schema")
def test_add_auto_field(self):
class AddAutoFieldModel(Model):
name = CharField(max_length=255, primary_key=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(AddAutoFieldModel)
self.isolated_local_models = [AddAutoFieldModel]
old_field = AddAutoFieldModel._meta.get_field("name")
new_field = CharField(max_length=255)
new_field.set_attributes_from_name("name")
new_field.model = AddAutoFieldModel
with connection.schema_editor() as editor:
editor.alter_field(AddAutoFieldModel, old_field, new_field)
new_auto_field = AutoField(primary_key=True)
new_auto_field.set_attributes_from_name("id")
new_auto_field.model = AddAutoFieldModel()
with connection.schema_editor() as editor:
editor.add_field(AddAutoFieldModel, new_auto_field)
# Crashes on PostgreSQL when the GENERATED BY suffix is missing.
AddAutoFieldModel.objects.create(name="test")
def test_remove_field(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
with CaptureQueriesContext(connection) as ctx:
editor.remove_field(Author, Author._meta.get_field("name"))
columns = self.column_classes(Author)
self.assertNotIn("name", columns)
if getattr(connection.features, "can_alter_table_drop_column", True):
# Table is not rebuilt.
self.assertIs(
any("CREATE TABLE" in query["sql"] for query in ctx.captured_queries),
False,
)
self.assertIs(
any("DROP TABLE" in query["sql"] for query in ctx.captured_queries),
False,
)
def test_remove_indexed_field(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorCharFieldWithIndex)
with connection.schema_editor() as editor:
editor.remove_field(
AuthorCharFieldWithIndex,
AuthorCharFieldWithIndex._meta.get_field("char_field"),
)
columns = self.column_classes(AuthorCharFieldWithIndex)
self.assertNotIn("char_field", columns)
def test_alter(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(
bool(columns["name"][1][6]),
bool(connection.features.interprets_empty_strings_as_nulls),
)
# Alter the name field to a TextField
old_field = Author._meta.get_field("name")
new_field = TextField(null=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
self.assertEqual(columns["name"][0], "TextField")
self.assertTrue(columns["name"][1][6])
# Change nullability again
new_field2 = TextField(null=False)
new_field2.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
columns = self.column_classes(Author)
self.assertEqual(columns["name"][0], "TextField")
self.assertEqual(
bool(columns["name"][1][6]),
bool(connection.features.interprets_empty_strings_as_nulls),
)
def test_alter_auto_field_to_integer_field(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change AutoField to IntegerField
old_field = Author._meta.get_field("id")
new_field = IntegerField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Now that ID is an IntegerField, the database raises an error if it
# isn't provided.
if not connection.features.supports_unspecified_pk:
with self.assertRaises(DatabaseError):
Author.objects.create()
def test_alter_auto_field_to_char_field(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change AutoField to CharField
old_field = Author._meta.get_field("id")
new_field = CharField(primary_key=True, max_length=50)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
@isolate_apps("schema")
def test_alter_auto_field_quoted_db_column(self):
class Foo(Model):
id = AutoField(primary_key=True, db_column='"quoted_id"')
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = Foo
new_field.db_column = '"quoted_id"'
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
Foo.objects.create()
def test_alter_not_unique_field_to_primary_key(self):
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change UUIDField to primary key.
old_field = Author._meta.get_field("uuid")
new_field = UUIDField(primary_key=True)
new_field.set_attributes_from_name("uuid")
new_field.model = Author
with connection.schema_editor() as editor:
editor.remove_field(Author, Author._meta.get_field("id"))
editor.alter_field(Author, old_field, new_field, strict=True)
# Redundant unique constraint is not added.
count = self.get_constraints_count(
Author._meta.db_table,
Author._meta.get_field("uuid").column,
None,
)
self.assertLessEqual(count["uniques"], 1)
@isolate_apps("schema")
def test_alter_primary_key_quoted_db_table(self):
class Foo(Model):
class Meta:
app_label = "schema"
db_table = '"foo"'
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = Foo
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
Foo.objects.create()
def test_alter_text_field(self):
# Regression for "BLOB/TEXT column 'info' can't have a default value")
# on MySQL.
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = TextField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
def test_alter_text_field_to_not_null_with_default_value(self):
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("address")
new_field = TextField(blank=True, default="", null=False)
new_field.set_attributes_from_name("address")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
@skipUnlessDBFeature("can_defer_constraint_checks", "can_rollback_ddl")
def test_alter_fk_checks_deferred_constraints(self):
"""
#25492 - Altering a foreign key's structure and data in the same
transaction.
"""
with connection.schema_editor() as editor:
editor.create_model(Node)
old_field = Node._meta.get_field("parent")
new_field = ForeignKey(Node, CASCADE)
new_field.set_attributes_from_name("parent")
parent = Node.objects.create()
with connection.schema_editor() as editor:
# Update the parent FK to create a deferred constraint check.
Node.objects.update(parent=parent)
editor.alter_field(Node, old_field, new_field, strict=True)
@isolate_apps("schema")
def test_alter_null_with_default_value_deferred_constraints(self):
class Publisher(Model):
class Meta:
app_label = "schema"
class Article(Model):
publisher = ForeignKey(Publisher, CASCADE)
title = CharField(max_length=50, null=True)
description = CharField(max_length=100, null=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Publisher)
editor.create_model(Article)
self.isolated_local_models = [Article, Publisher]
publisher = Publisher.objects.create()
Article.objects.create(publisher=publisher)
old_title = Article._meta.get_field("title")
new_title = CharField(max_length=50, null=False, default="")
new_title.set_attributes_from_name("title")
old_description = Article._meta.get_field("description")
new_description = CharField(max_length=100, null=False, default="")
new_description.set_attributes_from_name("description")
with connection.schema_editor() as editor:
editor.alter_field(Article, old_title, new_title, strict=True)
editor.alter_field(Article, old_description, new_description, strict=True)
def test_alter_text_field_to_date_field(self):
"""
#25002 - Test conversion of text field to date field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info="1988-05-05")
old_field = Note._meta.get_field("info")
new_field = DateField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns["info"][1][6])
def test_alter_text_field_to_datetime_field(self):
"""
#25002 - Test conversion of text field to datetime field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info="1988-05-05 3:16:17.4567")
old_field = Note._meta.get_field("info")
new_field = DateTimeField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns["info"][1][6])
def test_alter_text_field_to_time_field(self):
"""
#25002 - Test conversion of text field to time field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info="3:16:17.4567")
old_field = Note._meta.get_field("info")
new_field = TimeField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns["info"][1][6])
@skipIfDBFeature("interprets_empty_strings_as_nulls")
def test_alter_textual_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=50)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
@skipUnlessDBFeature("interprets_empty_strings_as_nulls")
def test_alter_textual_field_not_null_to_null(self):
"""
Nullability for textual fields is preserved on databases that
interpret empty strings as NULLs.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
columns = self.column_classes(Author)
# Field is nullable.
self.assertTrue(columns["uuid"][1][6])
# Change to NOT NULL.
old_field = Author._meta.get_field("uuid")
new_field = SlugField(null=False, blank=True)
new_field.set_attributes_from_name("uuid")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
# Nullability is preserved.
self.assertTrue(columns["uuid"][1][6])
def test_alter_numeric_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug="aaa")
old_field = UniqueTest._meta.get_field("year")
new_field = BigIntegerField()
new_field.set_attributes_from_name("year")
with connection.schema_editor() as editor:
editor.alter_field(UniqueTest, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug="bbb")
def test_alter_null_to_not_null(self):
"""
#23609 - Tests handling of default values when altering from NULL to NOT NULL.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertTrue(columns["height"][1][6])
# Create some test data
Author.objects.create(name="Not null author", height=12)
Author.objects.create(name="Null author")
# Verify null value
self.assertEqual(Author.objects.get(name="Not null author").height, 12)
self.assertIsNone(Author.objects.get(name="Null author").height)
# Alter the height field to NOT NULL with default
old_field = Author._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
self.assertFalse(columns["height"][1][6])
# Verify default value
self.assertEqual(Author.objects.get(name="Not null author").height, 12)
self.assertEqual(Author.objects.get(name="Null author").height, 42)
def test_alter_charfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_nulls when changing a CharField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change the CharField to null
old_field = Author._meta.get_field("name")
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_char_field_decrease_length(self):
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
Author.objects.create(name="x" * 255)
# Change max_length of CharField.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
msg = "value too long for type character varying(254)"
with self.assertRaisesMessage(DataError, msg):
editor.alter_field(Author, old_field, new_field, strict=True)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_with_custom_db_type(self):
from django.contrib.postgres.fields import ArrayField
class Foo(Model):
field = ArrayField(CharField(max_length=255))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("field")
new_field = ArrayField(CharField(max_length=16))
new_field.set_attributes_from_name("field")
new_field.model = Foo
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_array_field_decrease_base_field_length(self):
from django.contrib.postgres.fields import ArrayField
class ArrayModel(Model):
field = ArrayField(CharField(max_length=16))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(ArrayModel)
self.isolated_local_models = [ArrayModel]
ArrayModel.objects.create(field=["x" * 16])
old_field = ArrayModel._meta.get_field("field")
new_field = ArrayField(CharField(max_length=15))
new_field.set_attributes_from_name("field")
new_field.model = ArrayModel
with connection.schema_editor() as editor:
msg = "value too long for type character varying(15)"
with self.assertRaisesMessage(DataError, msg):
editor.alter_field(ArrayModel, old_field, new_field, strict=True)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_array_field_decrease_nested_base_field_length(self):
from django.contrib.postgres.fields import ArrayField
class ArrayModel(Model):
field = ArrayField(ArrayField(CharField(max_length=16)))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(ArrayModel)
self.isolated_local_models = [ArrayModel]
ArrayModel.objects.create(field=[["x" * 16]])
old_field = ArrayModel._meta.get_field("field")
new_field = ArrayField(ArrayField(CharField(max_length=15)))
new_field.set_attributes_from_name("field")
new_field.model = ArrayModel
with connection.schema_editor() as editor:
msg = "value too long for type character varying(15)"
with self.assertRaisesMessage(DataError, msg):
editor.alter_field(ArrayModel, old_field, new_field, strict=True)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
@skipUnlessDBFeature(
"supports_collation_on_charfield",
"supports_non_deterministic_collations",
)
def test_db_collation_arrayfield(self):
from django.contrib.postgres.fields import ArrayField
ci_collation = "case_insensitive"
cs_collation = "en-x-icu"
def drop_collation():
with connection.cursor() as cursor:
cursor.execute(f"DROP COLLATION IF EXISTS {ci_collation}")
with connection.cursor() as cursor:
cursor.execute(
f"CREATE COLLATION IF NOT EXISTS {ci_collation} (provider = icu, "
f"locale = 'und-u-ks-level2', deterministic = false)"
)
self.addCleanup(drop_collation)
class ArrayModel(Model):
field = ArrayField(CharField(max_length=16, db_collation=ci_collation))
class Meta:
app_label = "schema"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(ArrayModel)
self.isolated_local_models = [ArrayModel]
self.assertEqual(
self.get_column_collation(ArrayModel._meta.db_table, "field"),
ci_collation,
)
# Alter collation.
old_field = ArrayModel._meta.get_field("field")
new_field_cs = ArrayField(CharField(max_length=16, db_collation=cs_collation))
new_field_cs.set_attributes_from_name("field")
new_field_cs.model = ArrayField
with connection.schema_editor() as editor:
editor.alter_field(ArrayModel, old_field, new_field_cs, strict=True)
self.assertEqual(
self.get_column_collation(ArrayModel._meta.db_table, "field"),
cs_collation,
)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
@skipUnlessDBFeature(
"supports_collation_on_charfield",
"supports_non_deterministic_collations",
)
def test_unique_with_collation_charfield(self):
ci_collation = "case_insensitive"
def drop_collation():
with connection.cursor() as cursor:
cursor.execute(f"DROP COLLATION IF EXISTS {ci_collation}")
with connection.cursor() as cursor:
cursor.execute(
f"CREATE COLLATION IF NOT EXISTS {ci_collation} (provider = icu, "
f"locale = 'und-u-ks-level2', deterministic = false)"
)
self.addCleanup(drop_collation)
class CiCharModel(Model):
field = CharField(max_length=16, db_collation=ci_collation, unique=True)
class Meta:
app_label = "schema"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(CiCharModel)
self.isolated_local_models = [CiCharModel]
self.assertEqual(
self.get_column_collation(CiCharModel._meta.db_table, "field"),
ci_collation,
)
self.assertIn("field", self.get_uniques(CiCharModel._meta.db_table))
def test_alter_textfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_nulls when changing a TextField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
# Change the TextField to null
old_field = Note._meta.get_field("info")
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
def test_alter_null_to_not_null_keeping_default(self):
"""
#23738 - Can change a nullable field with default to non-nullable
with the same default.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
# Ensure the field is right to begin with
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertTrue(columns["height"][1][6])
# Alter the height field to NOT NULL keeping the previous default
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorWithDefaultHeight, old_field, new_field, strict=True
)
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertFalse(columns["height"][1][6])
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_fk(self):
"""
Tests altering of FKs
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
# Alter the FK
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE, editable=False)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_to_fk(self):
"""
#24447 - Tests adding a FK constraint for an existing column
"""
class LocalBook(Model):
author = IntegerField()
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalBook]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBook)
# Ensure no FK constraint exists
constraints = self.get_constraints(LocalBook._meta.db_table)
for details in constraints.values():
if details["foreign_key"]:
self.fail(
"Found an unexpected FK constraint to %s" % details["columns"]
)
old_field = LocalBook._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(LocalBook, old_field, new_field, strict=True)
self.assertForeignKeyExists(LocalBook, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_alter_o2o_to_fk(self):
"""
#24163 - Tests altering of OneToOneField to ForeignKey
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
# Ensure the field is right to begin with
columns = self.column_classes(BookWithO2O)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is unique
author = Author.objects.create(name="Joe")
BookWithO2O.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
BookWithO2O.objects.all().delete()
self.assertForeignKeyExists(BookWithO2O, "author_id", "schema_author")
# Alter the OneToOneField to ForeignKey
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is not unique anymore
Book.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
Book.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_alter_fk_to_o2o(self):
"""
#24163 - Tests altering of ForeignKey to OneToOneField
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is not unique
author = Author.objects.create(name="Joe")
Book.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
Book.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
Book.objects.all().delete()
self.assertForeignKeyExists(Book, "author_id", "schema_author")
# Alter the ForeignKey to OneToOneField
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
columns = self.column_classes(BookWithO2O)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is unique now
BookWithO2O.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
self.assertForeignKeyExists(BookWithO2O, "author_id", "schema_author")
def test_alter_field_fk_to_o2o(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
expected_indexes = 1 if connection.features.indexes_foreign_keys else 0
# Check the index is right to begin with.
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(
counts,
{"fks": expected_fks, "uniques": 0, "indexes": expected_indexes},
)
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field)
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The index on ForeignKey is replaced with a unique constraint for
# OneToOneField.
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
def test_autofield_to_o2o(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Note)
# Rename the field.
old_field = Author._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("note_ptr")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Alter AutoField to OneToOneField.
new_field_o2o = OneToOneField(Note, CASCADE)
new_field_o2o.set_attributes_from_name("note_ptr")
new_field_o2o.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field_o2o, strict=True)
columns = self.column_classes(Author)
field_type, _ = columns["note_ptr_id"]
self.assertEqual(
field_type, connection.features.introspected_field_types["IntegerField"]
)
def test_alter_field_fk_keeps_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
expected_indexes = 1 if connection.features.indexes_foreign_keys else 0
# Check the index is right to begin with.
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(
counts,
{"fks": expected_fks, "uniques": 0, "indexes": expected_indexes},
)
old_field = Book._meta.get_field("author")
# on_delete changed from CASCADE.
new_field = ForeignKey(Author, PROTECT)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The index remains.
self.assertEqual(
counts,
{"fks": expected_fks, "uniques": 0, "indexes": expected_indexes},
)
def test_alter_field_o2o_to_fk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
# Check the unique constraint is right to begin with.
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field)
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The unique constraint on OneToOneField is replaced with an index for
# ForeignKey.
self.assertEqual(counts, {"fks": expected_fks, "uniques": 0, "indexes": 1})
def test_alter_field_o2o_keeps_unique(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
# Check the unique constraint is right to begin with.
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
old_field = BookWithO2O._meta.get_field("author")
# on_delete changed from CASCADE.
new_field = OneToOneField(Author, PROTECT)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The unique constraint remains.
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
@skipUnlessDBFeature("ignores_table_name_case")
def test_alter_db_table_case(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Alter the case of the table
old_table_name = Author._meta.db_table
with connection.schema_editor() as editor:
editor.alter_db_table(Author, old_table_name, old_table_name.upper())
def test_alter_implicit_id_to_explicit(self):
"""
Should be able to convert an implicit "id" field to an explicit "id"
primary key field.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# This will fail if DROP DEFAULT is inadvertently executed on this
# field which drops the id sequence, at least on PostgreSQL.
Author.objects.create(name="Foo")
Author.objects.create(name="Bar")
def test_alter_autofield_pk_to_bigautofield_pk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
Author.objects.create(name="Foo", pk=1)
with connection.cursor() as cursor:
sequence_reset_sqls = connection.ops.sequence_reset_sql(
no_style(), [Author]
)
if sequence_reset_sqls:
cursor.execute(sequence_reset_sqls[0])
self.assertIsNotNone(Author.objects.create(name="Bar"))
def test_alter_autofield_pk_to_smallautofield_pk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = SmallAutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
Author.objects.create(name="Foo", pk=1)
with connection.cursor() as cursor:
sequence_reset_sqls = connection.ops.sequence_reset_sql(
no_style(), [Author]
)
if sequence_reset_sqls:
cursor.execute(sequence_reset_sqls[0])
self.assertIsNotNone(Author.objects.create(name="Bar"))
def test_alter_int_pk_to_autofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
AutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field("i")
new_field = AutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# A model representing the updated model.
class IntegerPKToAutoField(Model):
i = AutoField(primary_key=True)
j = IntegerField(unique=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = IntegerPK._meta.db_table
# An id (i) is generated by the database.
obj = IntegerPKToAutoField.objects.create(j=1)
self.assertIsNotNone(obj.i)
def test_alter_int_pk_to_bigautofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
BigAutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field("i")
new_field = BigAutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# A model representing the updated model.
class IntegerPKToBigAutoField(Model):
i = BigAutoField(primary_key=True)
j = IntegerField(unique=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = IntegerPK._meta.db_table
# An id (i) is generated by the database.
obj = IntegerPKToBigAutoField.objects.create(j=1)
self.assertIsNotNone(obj.i)
@isolate_apps("schema")
def test_alter_smallint_pk_to_smallautofield_pk(self):
"""
Should be able to rename an SmallIntegerField(primary_key=True) to
SmallAutoField(primary_key=True).
"""
class SmallIntegerPK(Model):
i = SmallIntegerField(primary_key=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(SmallIntegerPK)
self.isolated_local_models = [SmallIntegerPK]
old_field = SmallIntegerPK._meta.get_field("i")
new_field = SmallAutoField(primary_key=True)
new_field.model = SmallIntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(SmallIntegerPK, old_field, new_field, strict=True)
def test_alter_int_pk_to_int_unique(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
IntegerField(unique=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
# Delete the old PK
old_field = IntegerPK._meta.get_field("i")
new_field = IntegerField(unique=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# The primary key constraint is gone. Result depends on database:
# 'id' for SQLite, None for others (must not be 'i').
self.assertIn(self.get_primary_key(IntegerPK._meta.db_table), ("id", None))
# Set up a model class as it currently stands. The original IntegerPK
# class is now out of date and some backends make use of the whole
# model class when modifying a field (such as sqlite3 when remaking a
# table) so an outdated model class leads to incorrect results.
class Transitional(Model):
i = IntegerField(unique=True)
j = IntegerField(unique=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = "INTEGERPK"
# model requires a new PK
old_field = Transitional._meta.get_field("j")
new_field = IntegerField(primary_key=True)
new_field.model = Transitional
new_field.set_attributes_from_name("j")
with connection.schema_editor() as editor:
editor.alter_field(Transitional, old_field, new_field, strict=True)
# Create a model class representing the updated model.
class IntegerUnique(Model):
i = IntegerField(unique=True)
j = IntegerField(primary_key=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = "INTEGERPK"
# Ensure unique constraint works.
IntegerUnique.objects.create(i=1, j=1)
with self.assertRaises(IntegrityError):
IntegerUnique.objects.create(i=1, j=2)
def test_rename(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertNotIn("display_name", columns)
# Alter the name field's name
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("display_name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
self.assertEqual(
columns["display_name"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertNotIn("name", columns)
@isolate_apps("schema")
def test_rename_referenced_field(self):
class Author(Model):
name = CharField(max_length=255, unique=True)
class Meta:
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE, to_field="name")
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("renamed")
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
editor.alter_field(Author, Author._meta.get_field("name"), new_field)
# Ensure the foreign key reference was updated.
self.assertForeignKeyExists(Book, "author_id", "schema_author", "renamed")
@skipIfDBFeature("interprets_empty_strings_as_nulls")
def test_rename_keep_null_status(self):
"""
Renaming a field shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = TextField()
new_field.set_attributes_from_name("detail_info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns["detail_info"][0], "TextField")
self.assertNotIn("info", columns)
with self.assertRaises(IntegrityError):
NoteRename.objects.create(detail_info=None)
def _test_m2m_create(self, M2MFieldClass):
"""
Tests M2M fields on models during creation
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author, CASCADE)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2M)
# Ensure there is now an m2m table there
columns = self.column_classes(
LocalBookWithM2M._meta.get_field("tags").remote_field.through
)
self.assertEqual(
columns["tagm2mtest_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
def test_m2m_create(self):
self._test_m2m_create(ManyToManyField)
def test_m2m_create_custom(self):
self._test_m2m_create(CustomManyToManyField)
def test_m2m_create_inherited(self):
self._test_m2m_create(InheritedManyToManyField)
def _test_m2m_create_through(self, M2MFieldClass):
"""
Tests M2M fields on models during creation with through models
"""
class LocalTagThrough(Model):
book = ForeignKey("schema.LocalBookWithM2MThrough", CASCADE)
tag = ForeignKey("schema.TagM2MTest", CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
class LocalBookWithM2MThrough(Model):
tags = M2MFieldClass(
"TagM2MTest", related_name="books", through=LocalTagThrough
)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalTagThrough, LocalBookWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalTagThrough)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2MThrough)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalTagThrough)
self.assertEqual(
columns["book_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertEqual(
columns["tag_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
def test_m2m_create_through(self):
self._test_m2m_create_through(ManyToManyField)
def test_m2m_create_through_custom(self):
self._test_m2m_create_through(CustomManyToManyField)
def test_m2m_create_through_inherited(self):
self._test_m2m_create_through(InheritedManyToManyField)
def test_m2m_through_remove(self):
class LocalAuthorNoteThrough(Model):
book = ForeignKey("schema.Author", CASCADE)
tag = ForeignKey("self", CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
class LocalNoteWithM2MThrough(Model):
authors = ManyToManyField("schema.Author", through=LocalAuthorNoteThrough)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorNoteThrough, LocalNoteWithM2MThrough]
# Create the tables.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalAuthorNoteThrough)
editor.create_model(LocalNoteWithM2MThrough)
# Remove the through parameter.
old_field = LocalNoteWithM2MThrough._meta.get_field("authors")
new_field = ManyToManyField("Author")
new_field.set_attributes_from_name("authors")
msg = (
f"Cannot alter field {old_field} into {new_field} - they are not "
f"compatible types (you cannot alter to or from M2M fields, or add or "
f"remove through= on M2M fields)"
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
editor.alter_field(LocalNoteWithM2MThrough, old_field, new_field)
def _test_m2m(self, M2MFieldClass):
"""
Tests adding/removing M2M fields on models
"""
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorWithM2M)
editor.create_model(TagM2MTest)
# Create an M2M field
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors")
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Ensure there's no m2m table there
with self.assertRaises(DatabaseError):
self.column_classes(new_field.remote_field.through)
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
# Ensure there is now an m2m table there
columns = self.column_classes(new_field.remote_field.through)
self.assertEqual(
columns["tagm2mtest_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# "Alter" the field. This should not rename the DB table to itself.
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2M, new_field, new_field, strict=True)
# Remove the M2M table again
with connection.schema_editor() as editor:
editor.remove_field(LocalAuthorWithM2M, new_field)
# Ensure there's no m2m table there
with self.assertRaises(DatabaseError):
self.column_classes(new_field.remote_field.through)
# Make sure the model state is coherent with the table one now that
# we've removed the tags field.
opts = LocalAuthorWithM2M._meta
opts.local_many_to_many.remove(new_field)
del new_apps.all_models["schema"][
new_field.remote_field.through._meta.model_name
]
opts._expire_cache()
def test_m2m(self):
self._test_m2m(ManyToManyField)
def test_m2m_custom(self):
self._test_m2m(CustomManyToManyField)
def test_m2m_inherited(self):
self._test_m2m(InheritedManyToManyField)
def _test_m2m_through_alter(self, M2MFieldClass):
"""
Tests altering M2Ms with explicit through models (should no-op)
"""
class LocalAuthorTag(Model):
author = ForeignKey("schema.LocalAuthorWithM2MThrough", CASCADE)
tag = ForeignKey("schema.TagM2MTest", CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
class LocalAuthorWithM2MThrough(Model):
name = CharField(max_length=255)
tags = M2MFieldClass(
"schema.TagM2MTest", related_name="authors", through=LocalAuthorTag
)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorTag)
editor.create_model(LocalAuthorWithM2MThrough)
editor.create_model(TagM2MTest)
# Ensure the m2m table is there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
# "Alter" the field's blankness. This should not actually do anything.
old_field = LocalAuthorWithM2MThrough._meta.get_field("tags")
new_field = M2MFieldClass(
"schema.TagM2MTest", related_name="authors", through=LocalAuthorTag
)
new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags")
with connection.schema_editor() as editor:
editor.alter_field(
LocalAuthorWithM2MThrough, old_field, new_field, strict=True
)
# Ensure the m2m table is still there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
def test_m2m_through_alter(self):
self._test_m2m_through_alter(ManyToManyField)
def test_m2m_through_alter_custom(self):
self._test_m2m_through_alter(CustomManyToManyField)
def test_m2m_through_alter_inherited(self):
self._test_m2m_through_alter(InheritedManyToManyField)
def _test_m2m_repoint(self, M2MFieldClass):
"""
Tests repointing M2M fields
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author, CASCADE)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBookWithM2M)
editor.create_model(TagM2MTest)
editor.create_model(UniqueTest)
# Ensure the M2M exists and points to TagM2MTest
if connection.features.supports_foreign_keys:
self.assertForeignKeyExists(
LocalBookWithM2M._meta.get_field("tags").remote_field.through,
"tagm2mtest_id",
"schema_tagm2mtest",
)
# Repoint the M2M
old_field = LocalBookWithM2M._meta.get_field("tags")
new_field = M2MFieldClass(UniqueTest)
new_field.contribute_to_class(LocalBookWithM2M, "uniques")
with connection.schema_editor() as editor:
editor.alter_field(LocalBookWithM2M, old_field, new_field, strict=True)
# Ensure old M2M is gone
with self.assertRaises(DatabaseError):
self.column_classes(
LocalBookWithM2M._meta.get_field("tags").remote_field.through
)
# This model looks like the new model and is used for teardown.
opts = LocalBookWithM2M._meta
opts.local_many_to_many.remove(old_field)
# Ensure the new M2M exists and points to UniqueTest
if connection.features.supports_foreign_keys:
self.assertForeignKeyExists(
new_field.remote_field.through, "uniquetest_id", "schema_uniquetest"
)
def test_m2m_repoint(self):
self._test_m2m_repoint(ManyToManyField)
def test_m2m_repoint_custom(self):
self._test_m2m_repoint(CustomManyToManyField)
def test_m2m_repoint_inherited(self):
self._test_m2m_repoint(InheritedManyToManyField)
@isolate_apps("schema")
def test_m2m_rename_field_in_target_model(self):
class LocalTagM2MTest(Model):
title = CharField(max_length=255)
class Meta:
app_label = "schema"
class LocalM2M(Model):
tags = ManyToManyField(LocalTagM2MTest)
class Meta:
app_label = "schema"
# Create the tables.
with connection.schema_editor() as editor:
editor.create_model(LocalM2M)
editor.create_model(LocalTagM2MTest)
self.isolated_local_models = [LocalM2M, LocalTagM2MTest]
# Ensure the m2m table is there.
self.assertEqual(len(self.column_classes(LocalM2M)), 1)
# Alter a field in LocalTagM2MTest.
old_field = LocalTagM2MTest._meta.get_field("title")
new_field = CharField(max_length=254)
new_field.contribute_to_class(LocalTagM2MTest, "title1")
# @isolate_apps() and inner models are needed to have the model
# relations populated, otherwise this doesn't act as a regression test.
self.assertEqual(len(new_field.model._meta.related_objects), 1)
with connection.schema_editor() as editor:
editor.alter_field(LocalTagM2MTest, old_field, new_field, strict=True)
# Ensure the m2m table is still there.
self.assertEqual(len(self.column_classes(LocalM2M)), 1)
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
def test_check_constraints(self):
"""
Tests creating/deleting CHECK constraints
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the constraint exists
constraints = self.get_constraints(Author._meta.db_table)
if not any(
details["columns"] == ["height"] and details["check"]
for details in constraints.values()
):
self.fail("No check constraint for height found")
# Alter the column to remove it
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for details in constraints.values():
if details["columns"] == ["height"] and details["check"]:
self.fail("Check constraint for height found")
# Alter the column to re-add it
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
if not any(
details["columns"] == ["height"] and details["check"]
for details in constraints.values()
):
self.fail("No check constraint for height found")
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
@isolate_apps("schema")
def test_check_constraint_timedelta_param(self):
class DurationModel(Model):
duration = DurationField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(DurationModel)
self.isolated_local_models = [DurationModel]
constraint_name = "duration_gte_5_minutes"
constraint = CheckConstraint(
check=Q(duration__gt=datetime.timedelta(minutes=5)),
name=constraint_name,
)
DurationModel._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(DurationModel, constraint)
constraints = self.get_constraints(DurationModel._meta.db_table)
self.assertIn(constraint_name, constraints)
with self.assertRaises(IntegrityError), atomic():
DurationModel.objects.create(duration=datetime.timedelta(minutes=4))
DurationModel.objects.create(duration=datetime.timedelta(minutes=10))
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
def test_remove_field_check_does_not_remove_meta_constraints(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the custom check constraint
constraint = CheckConstraint(
check=Q(height__gte=0), name="author_height_gte_0_check"
)
custom_constraint_name = constraint.name
Author._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
# Ensure the constraints exist
constraints = self.get_constraints(Author._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["height"]
and details["check"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Alter the column to remove field check
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["height"]
and details["check"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 0)
# Alter the column to re-add field check
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["height"]
and details["check"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the check constraint
with connection.schema_editor() as editor:
Author._meta.constraints = []
editor.remove_constraint(Author, constraint)
def test_unique(self):
"""
Tests removing and adding unique constraints to a single column.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the field is unique to begin with
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be non-unique
old_field = Tag._meta.get_field("slug")
new_field = SlugField(unique=False)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
# Ensure the field is no longer unique
Tag.objects.create(title="foo", slug="foo")
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be unique
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
# Ensure the field is unique again
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Rename the field
new_field3 = SlugField(unique=True)
new_field3.set_attributes_from_name("slug2")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field2, new_field3, strict=True)
# Ensure the field is still unique
TagUniqueRename.objects.create(title="foo", slug2="foo")
with self.assertRaises(IntegrityError):
TagUniqueRename.objects.create(title="bar", slug2="foo")
Tag.objects.all().delete()
def test_unique_name_quoting(self):
old_table_name = TagUniqueRename._meta.db_table
try:
with connection.schema_editor() as editor:
editor.create_model(TagUniqueRename)
editor.alter_db_table(TagUniqueRename, old_table_name, "unique-table")
TagUniqueRename._meta.db_table = "unique-table"
# This fails if the unique index name isn't quoted.
editor.alter_unique_together(TagUniqueRename, [], (("title", "slug2"),))
finally:
with connection.schema_editor() as editor:
editor.delete_model(TagUniqueRename)
TagUniqueRename._meta.db_table = old_table_name
@isolate_apps("schema")
@skipUnlessDBFeature("supports_foreign_keys")
def test_unique_no_unnecessary_fk_drops(self):
"""
If AlterField isn't selective about dropping foreign key constraints
when modifying a field with a unique constraint, the AlterField
incorrectly drops and recreates the Book.author foreign key even though
it doesn't restrict the field being changed (#29193).
"""
class Author(Model):
name = CharField(max_length=254, unique=True)
class Meta:
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
new_field = CharField(max_length=255, unique=True)
new_field.model = Author
new_field.set_attributes_from_name("name")
with self.assertLogs("django.db.backends.schema", "DEBUG") as cm:
with connection.schema_editor() as editor:
editor.alter_field(Author, Author._meta.get_field("name"), new_field)
# One SQL statement is executed to alter the field.
self.assertEqual(len(cm.records), 1)
@isolate_apps("schema")
def test_unique_and_reverse_m2m(self):
"""
AlterField can modify a unique field when there's a reverse M2M
relation on the model.
"""
class Tag(Model):
title = CharField(max_length=255)
slug = SlugField(unique=True)
class Meta:
app_label = "schema"
class Book(Model):
tags = ManyToManyField(Tag, related_name="books")
class Meta:
app_label = "schema"
self.isolated_local_models = [Book._meta.get_field("tags").remote_field.through]
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Book)
new_field = SlugField(max_length=75, unique=True)
new_field.model = Tag
new_field.set_attributes_from_name("slug")
with self.assertLogs("django.db.backends.schema", "DEBUG") as cm:
with connection.schema_editor() as editor:
editor.alter_field(Tag, Tag._meta.get_field("slug"), new_field)
# One SQL statement is executed to alter the field.
self.assertEqual(len(cm.records), 1)
# Ensure that the field is still unique.
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_field_unique_does_not_remove_meta_constraints(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorWithUniqueName)
self.local_models = [AuthorWithUniqueName]
# Add the custom unique constraint
constraint = UniqueConstraint(fields=["name"], name="author_name_uniq")
custom_constraint_name = constraint.name
AuthorWithUniqueName._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(AuthorWithUniqueName, constraint)
# Ensure the constraints exist
constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Alter the column to remove field uniqueness
old_field = AuthorWithUniqueName._meta.get_field("name")
new_field = CharField(max_length=255)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithUniqueName, old_field, new_field, strict=True)
constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 0)
# Alter the column to re-add field uniqueness
new_field2 = AuthorWithUniqueName._meta.get_field("name")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithUniqueName, new_field, new_field2, strict=True)
constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the unique constraint
with connection.schema_editor() as editor:
AuthorWithUniqueName._meta.constraints = []
editor.remove_constraint(AuthorWithUniqueName, constraint)
def test_unique_together(self):
"""
Tests removing and adding unique_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
# Ensure the fields are unique to begin with
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2011, slug="foo")
UniqueTest.objects.create(year=2011, slug="bar")
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter the model to its non-unique-together companion
with connection.schema_editor() as editor:
editor.alter_unique_together(
UniqueTest, UniqueTest._meta.unique_together, []
)
# Ensure the fields are no longer unique
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_unique_together(
UniqueTest, [], UniqueTest._meta.unique_together
)
# Ensure the fields are unique again
UniqueTest.objects.create(year=2012, slug="foo")
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
def test_unique_together_with_fk(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [["author", "title"]])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [["author", "title"]], [])
def test_unique_together_with_fk_with_existing_index(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key, where the foreign key is added after the model is
created.
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithoutAuthor)
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
editor.add_field(BookWithoutAuthor, new_field)
# Ensure the fields aren't unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [["author", "title"]])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [["author", "title"]], [])
def _test_composed_index_with_fk(self, index):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
table = Book._meta.db_table
self.assertEqual(Book._meta.indexes, [])
Book._meta.indexes = [index]
with connection.schema_editor() as editor:
editor.add_index(Book, index)
self.assertIn(index.name, self.get_constraints(table))
Book._meta.indexes = []
with connection.schema_editor() as editor:
editor.remove_index(Book, index)
self.assertNotIn(index.name, self.get_constraints(table))
def test_composed_index_with_fk(self):
index = Index(fields=["author", "title"], name="book_author_title_idx")
self._test_composed_index_with_fk(index)
def test_composed_desc_index_with_fk(self):
index = Index(fields=["-author", "title"], name="book_author_title_idx")
self._test_composed_index_with_fk(index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_composed_func_index_with_fk(self):
index = Index(F("author"), F("title"), name="book_author_title_idx")
self._test_composed_index_with_fk(index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_composed_desc_func_index_with_fk(self):
index = Index(F("author").desc(), F("title"), name="book_author_title_idx")
self._test_composed_index_with_fk(index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_composed_func_transform_index_with_fk(self):
index = Index(F("title__lower"), name="book_title_lower_idx")
with register_lookup(CharField, Lower):
self._test_composed_index_with_fk(index)
def _test_composed_constraint_with_fk(self, constraint):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
table = Book._meta.db_table
self.assertEqual(Book._meta.constraints, [])
Book._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(Book, constraint)
self.assertIn(constraint.name, self.get_constraints(table))
Book._meta.constraints = []
with connection.schema_editor() as editor:
editor.remove_constraint(Book, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
def test_composed_constraint_with_fk(self):
constraint = UniqueConstraint(
fields=["author", "title"],
name="book_author_title_uniq",
)
self._test_composed_constraint_with_fk(constraint)
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
def test_composed_check_constraint_with_fk(self):
constraint = CheckConstraint(check=Q(author__gt=0), name="book_author_check")
self._test_composed_constraint_with_fk(constraint)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_unique_together_does_not_remove_meta_constraints(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorWithUniqueNameAndBirthday)
self.local_models = [AuthorWithUniqueNameAndBirthday]
# Add the custom unique constraint
constraint = UniqueConstraint(
fields=["name", "birthday"], name="author_name_birthday_uniq"
)
custom_constraint_name = constraint.name
AuthorWithUniqueNameAndBirthday._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(AuthorWithUniqueNameAndBirthday, constraint)
# Ensure the constraints exist
constraints = self.get_constraints(
AuthorWithUniqueNameAndBirthday._meta.db_table
)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Remove unique together
unique_together = AuthorWithUniqueNameAndBirthday._meta.unique_together
with connection.schema_editor() as editor:
editor.alter_unique_together(
AuthorWithUniqueNameAndBirthday, unique_together, []
)
constraints = self.get_constraints(
AuthorWithUniqueNameAndBirthday._meta.db_table
)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 0)
# Re-add unique together
with connection.schema_editor() as editor:
editor.alter_unique_together(
AuthorWithUniqueNameAndBirthday, [], unique_together
)
constraints = self.get_constraints(
AuthorWithUniqueNameAndBirthday._meta.db_table
)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the unique constraint
with connection.schema_editor() as editor:
AuthorWithUniqueNameAndBirthday._meta.constraints = []
editor.remove_constraint(AuthorWithUniqueNameAndBirthday, constraint)
def test_unique_constraint(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(fields=["name"], name="name_uq")
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIs(sql.references_table(table), True)
self.assertIs(sql.references_column(table, "name"), True)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(Upper("name").desc(), name="func_upper_uq")
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, constraint.name, ["DESC"])
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
# SQL contains a database function.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_composite_func_unique_constraint(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithSlug)
constraint = UniqueConstraint(
Upper("title"),
Lower("slug"),
name="func_upper_lower_unq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(BookWithSlug, constraint)
sql = constraint.create_sql(BookWithSlug, editor)
table = BookWithSlug._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
# SQL contains database functions.
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "slug"), True)
sql = str(sql)
self.assertIn("UPPER(%s)" % editor.quote_name("title"), sql)
self.assertIn("LOWER(%s)" % editor.quote_name("slug"), sql)
self.assertLess(sql.index("UPPER"), sql.index("LOWER"))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(BookWithSlug, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_unique_constraint_field_and_expression(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
F("height").desc(),
"uuid",
Lower("name").asc(),
name="func_f_lower_field_unq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, constraint.name, ["DESC", "ASC", "ASC"])
constraints = self.get_constraints(table)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertEqual(len(constraints[constraint.name]["columns"]), 3)
self.assertEqual(constraints[constraint.name]["columns"][1], "uuid")
# SQL contains database functions and columns.
self.assertIs(sql.references_column(table, "height"), True)
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "uuid"), True)
self.assertIn("LOWER(%s)" % editor.quote_name("name"), str(sql))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_partial_indexes")
def test_func_unique_constraint_partial(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
Upper("name"),
name="func_upper_cond_weight_uq",
condition=Q(weight__isnull=False),
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql))
self.assertIn(
"WHERE %s IS NOT NULL" % editor.quote_name("weight"),
str(sql),
)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_covering_indexes")
def test_func_unique_constraint_covering(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
Upper("name"),
name="func_upper_covering_uq",
include=["weight", "height"],
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertEqual(
constraints[constraint.name]["columns"],
[None, "weight", "height"],
)
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "weight"), True)
self.assertIs(sql.references_column(table, "height"), True)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql))
self.assertIn(
"INCLUDE (%s, %s)"
% (
editor.quote_name("weight"),
editor.quote_name("height"),
),
str(sql),
)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_lookups(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
with register_lookup(CharField, Lower), register_lookup(IntegerField, Abs):
constraint = UniqueConstraint(
F("name__lower"),
F("weight__abs"),
name="func_lower_abs_lookup_uq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
# SQL contains columns.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "weight"), True)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_collate(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithSlug)
constraint = UniqueConstraint(
Collate(F("title"), collation=collation).desc(),
Collate("slug", collation=collation),
name="func_collate_uq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(BookWithSlug, constraint)
sql = constraint.create_sql(BookWithSlug, editor)
table = BookWithSlug._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, constraint.name, ["DESC", "ASC"])
# SQL contains columns and a collation.
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "slug"), True)
self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(BookWithSlug, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipIfDBFeature("supports_expression_indexes")
def test_func_unique_constraint_unsupported(self):
# UniqueConstraint is ignored on databases that don't support indexes on
# expressions.
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(F("name"), name="func_name_uq")
with connection.schema_editor() as editor, self.assertNumQueries(0):
self.assertIsNone(editor.add_constraint(Author, constraint))
self.assertIsNone(editor.remove_constraint(Author, constraint))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_nonexistent_field(self):
constraint = UniqueConstraint(Lower("nonexistent"), name="func_nonexistent_uq")
msg = (
"Cannot resolve keyword 'nonexistent' into field. Choices are: "
"height, id, name, uuid, weight"
)
with self.assertRaisesMessage(FieldError, msg):
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_nondeterministic(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(Random(), name="func_random_uq")
with connection.schema_editor() as editor:
with self.assertRaises(DatabaseError):
editor.add_constraint(Author, constraint)
@ignore_warnings(category=RemovedInDjango51Warning)
def test_index_together(self):
"""
Tests removing and adding index_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure there's no index on the year/slug columns first
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c["columns"] == ["slug", "title"]
),
False,
)
# Alter the model to add an index
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [], [("slug", "title")])
# Ensure there is now an index
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c["columns"] == ["slug", "title"]
),
True,
)
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [("slug", "title")], [])
# Ensure there's no index
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c["columns"] == ["slug", "title"]
),
False,
)
@ignore_warnings(category=RemovedInDjango51Warning)
def test_index_together_with_fk(self):
"""
Tests removing and adding index_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.index_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [], [["author", "title"]])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [["author", "title"]], [])
@ignore_warnings(category=RemovedInDjango51Warning)
@isolate_apps("schema")
def test_create_index_together(self):
"""
Tests creating models with index_together already defined
"""
class TagIndexed(Model):
title = CharField(max_length=255)
slug = SlugField(unique=True)
class Meta:
app_label = "schema"
index_together = [["slug", "title"]]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(TagIndexed)
self.isolated_local_models = [TagIndexed]
# Ensure there is an index
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tagindexed").values()
if c["columns"] == ["slug", "title"]
),
True,
)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
@ignore_warnings(category=RemovedInDjango51Warning)
@isolate_apps("schema")
def test_remove_index_together_does_not_remove_meta_indexes(self):
class AuthorWithIndexedNameAndBirthday(Model):
name = CharField(max_length=255)
birthday = DateField()
class Meta:
app_label = "schema"
index_together = [["name", "birthday"]]
with connection.schema_editor() as editor:
editor.create_model(AuthorWithIndexedNameAndBirthday)
self.isolated_local_models = [AuthorWithIndexedNameAndBirthday]
# Add the custom index
index = Index(fields=["name", "birthday"], name="author_name_birthday_idx")
custom_index_name = index.name
AuthorWithIndexedNameAndBirthday._meta.indexes = [index]
with connection.schema_editor() as editor:
editor.add_index(AuthorWithIndexedNameAndBirthday, index)
# Ensure the indexes exist
constraints = self.get_constraints(
AuthorWithIndexedNameAndBirthday._meta.db_table
)
self.assertIn(custom_index_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["index"]
and name != custom_index_name
]
self.assertEqual(len(other_constraints), 1)
# Remove index together
index_together = AuthorWithIndexedNameAndBirthday._meta.index_together
with connection.schema_editor() as editor:
editor.alter_index_together(
AuthorWithIndexedNameAndBirthday, index_together, []
)
constraints = self.get_constraints(
AuthorWithIndexedNameAndBirthday._meta.db_table
)
self.assertIn(custom_index_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["index"]
and name != custom_index_name
]
self.assertEqual(len(other_constraints), 0)
# Re-add index together
with connection.schema_editor() as editor:
editor.alter_index_together(
AuthorWithIndexedNameAndBirthday, [], index_together
)
constraints = self.get_constraints(
AuthorWithIndexedNameAndBirthday._meta.db_table
)
self.assertIn(custom_index_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["index"]
and name != custom_index_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the index
with connection.schema_editor() as editor:
AuthorWithIndexedNameAndBirthday._meta.indexes = []
editor.remove_index(AuthorWithIndexedNameAndBirthday, index)
@isolate_apps("schema")
def test_db_table(self):
"""
Tests renaming of the table
"""
class Author(Model):
name = CharField(max_length=255)
class Meta:
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = "schema"
# Create the table and one referring it.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there to begin with
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
# Alter the table
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
editor.alter_db_table(Author, "schema_author", "schema_otherauthor")
Author._meta.db_table = "schema_otherauthor"
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
# Ensure the foreign key reference was updated
self.assertForeignKeyExists(Book, "author_id", "schema_otherauthor")
# Alter the table again
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
editor.alter_db_table(Author, "schema_otherauthor", "schema_author")
# Ensure the table is still there
Author._meta.db_table = "schema_author"
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
def test_add_remove_index(self):
"""
Tests index addition and removal
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the table is there and has no index
self.assertNotIn("title", self.get_indexes(Author._meta.db_table))
# Add the index
index = Index(fields=["name"], name="author_title_idx")
with connection.schema_editor() as editor:
editor.add_index(Author, index)
self.assertIn("name", self.get_indexes(Author._meta.db_table))
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn("name", self.get_indexes(Author._meta.db_table))
def test_remove_db_index_doesnt_remove_custom_indexes(self):
"""
Changing db_index to False doesn't remove indexes from Meta.indexes.
"""
with connection.schema_editor() as editor:
editor.create_model(AuthorWithIndexedName)
self.local_models = [AuthorWithIndexedName]
# Ensure the table has its index
self.assertIn("name", self.get_indexes(AuthorWithIndexedName._meta.db_table))
# Add the custom index
index = Index(fields=["-name"], name="author_name_idx")
author_index_name = index.name
with connection.schema_editor() as editor:
db_index_name = editor._create_index_name(
table_name=AuthorWithIndexedName._meta.db_table,
column_names=("name",),
)
try:
AuthorWithIndexedName._meta.indexes = [index]
with connection.schema_editor() as editor:
editor.add_index(AuthorWithIndexedName, index)
old_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table)
self.assertIn(author_index_name, old_constraints)
self.assertIn(db_index_name, old_constraints)
# Change name field to db_index=False
old_field = AuthorWithIndexedName._meta.get_field("name")
new_field = CharField(max_length=255)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorWithIndexedName, old_field, new_field, strict=True
)
new_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table)
self.assertNotIn(db_index_name, new_constraints)
# The index from Meta.indexes is still in the database.
self.assertIn(author_index_name, new_constraints)
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(AuthorWithIndexedName, index)
finally:
AuthorWithIndexedName._meta.indexes = []
def test_order_index(self):
"""
Indexes defined with ordering (ASC/DESC) defined on column
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
# The table doesn't have an index
self.assertNotIn("title", self.get_indexes(Author._meta.db_table))
index_name = "author_name_idx"
# Add the index
index = Index(fields=["name", "-weight"], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(Author, index)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(Author._meta.db_table, index_name, ["ASC", "DESC"])
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
def test_indexes(self):
"""
Tests creation/altering of indexes
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there and has the right index
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to remove the index
old_field = Book._meta.get_field("title")
new_field = CharField(max_length=100, db_index=False)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the table is there and has no index
self.assertNotIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to re-add the index
new_field2 = Book._meta.get_field("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, new_field, new_field2, strict=True)
# Ensure the table is there and has the index again
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Add a unique column, verify that creates an implicit index
new_field3 = BookWithSlug._meta.get_field("slug")
with connection.schema_editor() as editor:
editor.add_field(Book, new_field3)
self.assertIn(
"slug",
self.get_uniques(Book._meta.db_table),
)
# Remove the unique, check the index goes with it
new_field4 = CharField(max_length=20, unique=False)
new_field4.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True)
self.assertNotIn(
"slug",
self.get_uniques(Book._meta.db_table),
)
def test_text_field_with_db_index(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorTextFieldWithIndex)
# The text_field index is present if the database supports it.
assertion = (
self.assertIn
if connection.features.supports_index_on_text_field
else self.assertNotIn
)
assertion(
"text_field", self.get_indexes(AuthorTextFieldWithIndex._meta.db_table)
)
def _index_expressions_wrappers(self):
index_expression = IndexExpression()
index_expression.set_wrapper_classes(connection)
return ", ".join(
[
wrapper_cls.__qualname__
for wrapper_cls in index_expression.wrapper_classes
]
)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_multiple_wrapper_references(self):
index = Index(OrderBy(F("name").desc(), descending=True), name="name")
msg = (
"Multiple references to %s can't be used in an indexed expression."
% self._index_expressions_wrappers()
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
editor.add_index(Author, index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_invalid_topmost_expressions(self):
index = Index(Upper(F("name").desc()), name="name")
msg = (
"%s must be topmost expressions in an indexed expression."
% self._index_expressions_wrappers()
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
editor.add_index(Author, index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Lower("name").desc(), name="func_lower_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC"])
# SQL contains a database function.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("LOWER(%s)" % editor.quote_name("name"), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_f(self):
with connection.schema_editor() as editor:
editor.create_model(Tag)
index = Index("slug", F("title").desc(), name="func_f_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Tag, index)
sql = index.create_sql(Tag, editor)
table = Tag._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(Tag._meta.db_table, index.name, ["ASC", "DESC"])
# SQL contains columns.
self.assertIs(sql.references_column(table, "slug"), True)
self.assertIs(sql.references_column(table, "title"), True)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Tag, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_lookups(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
with register_lookup(CharField, Lower), register_lookup(IntegerField, Abs):
index = Index(
F("name__lower"),
F("weight__abs"),
name="func_lower_abs_lookup_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
# SQL contains columns.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "weight"), True)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_composite_func_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Lower("name"), Upper("name"), name="func_lower_upper_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
# SQL contains database functions.
self.assertIs(sql.references_column(table, "name"), True)
sql = str(sql)
self.assertIn("LOWER(%s)" % editor.quote_name("name"), sql)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), sql)
self.assertLess(sql.index("LOWER"), sql.index("UPPER"))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_composite_func_index_field_and_expression(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
index = Index(
F("author").desc(),
Lower("title").asc(),
"pub_date",
name="func_f_lower_field_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Book, index)
sql = index.create_sql(Book, editor)
table = Book._meta.db_table
constraints = self.get_constraints(table)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC", "ASC", "ASC"])
self.assertEqual(len(constraints[index.name]["columns"]), 3)
self.assertEqual(constraints[index.name]["columns"][2], "pub_date")
# SQL contains database functions and columns.
self.assertIs(sql.references_column(table, "author_id"), True)
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "pub_date"), True)
self.assertIn("LOWER(%s)" % editor.quote_name("title"), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Book, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
@isolate_apps("schema")
def test_func_index_f_decimalfield(self):
class Node(Model):
value = DecimalField(max_digits=5, decimal_places=2)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Node)
index = Index(F("value"), name="func_f_decimalfield_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Node, index)
sql = index.create_sql(Node, editor)
table = Node._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "value"), True)
# SQL doesn't contain casting.
self.assertNotIn("CAST", str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Node, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_cast(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Cast("weight", FloatField()), name="func_cast_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "weight"), True)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_collate(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithSlug)
index = Index(
Collate(F("title"), collation=collation).desc(),
Collate("slug", collation=collation),
name="func_collate_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(BookWithSlug, index)
sql = index.create_sql(BookWithSlug, editor)
table = Book._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC", "ASC"])
# SQL contains columns and a collation.
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "slug"), True)
self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Book, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
@skipIfDBFeature("collate_as_index_expression")
def test_func_index_collate_f_ordered(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(
Collate(F("name").desc(), collation=collation),
name="func_collate_f_desc_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC"])
# SQL contains columns and a collation.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_calc(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(F("height") / (F("weight") + Value(5)), name="func_calc_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
# SQL contains columns and expressions.
self.assertIs(sql.references_column(table, "height"), True)
self.assertIs(sql.references_column(table, "weight"), True)
sql = str(sql)
self.assertIs(
sql.index(editor.quote_name("height"))
< sql.index("/")
< sql.index(editor.quote_name("weight"))
< sql.index("+")
< sql.index("5"),
True,
)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_json_field")
@isolate_apps("schema")
def test_func_index_json_key_transform(self):
class JSONModel(Model):
field = JSONField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(JSONModel)
self.isolated_local_models = [JSONModel]
index = Index("field__some_key", name="func_json_key_idx")
with connection.schema_editor() as editor:
editor.add_index(JSONModel, index)
sql = index.create_sql(JSONModel, editor)
table = JSONModel._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "field"), True)
with connection.schema_editor() as editor:
editor.remove_index(JSONModel, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_json_field")
@isolate_apps("schema")
def test_func_index_json_key_transform_cast(self):
class JSONModel(Model):
field = JSONField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(JSONModel)
self.isolated_local_models = [JSONModel]
index = Index(
Cast(KeyTextTransform("some_key", "field"), IntegerField()),
name="func_json_key_cast_idx",
)
with connection.schema_editor() as editor:
editor.add_index(JSONModel, index)
sql = index.create_sql(JSONModel, editor)
table = JSONModel._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "field"), True)
with connection.schema_editor() as editor:
editor.remove_index(JSONModel, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipIfDBFeature("supports_expression_indexes")
def test_func_index_unsupported(self):
# Index is ignored on databases that don't support indexes on
# expressions.
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(F("name"), name="random_idx")
with connection.schema_editor() as editor, self.assertNumQueries(0):
self.assertIsNone(editor.add_index(Author, index))
self.assertIsNone(editor.remove_index(Author, index))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_nonexistent_field(self):
index = Index(Lower("nonexistent"), name="func_nonexistent_idx")
msg = (
"Cannot resolve keyword 'nonexistent' into field. Choices are: "
"height, id, name, uuid, weight"
)
with self.assertRaisesMessage(FieldError, msg):
with connection.schema_editor() as editor:
editor.add_index(Author, index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_nondeterministic(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Random(), name="func_random_idx")
with connection.schema_editor() as editor:
with self.assertRaises(DatabaseError):
editor.add_index(Author, index)
def test_primary_key(self):
"""
Tests altering of the primary key
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the table is there and has the right PK
self.assertEqual(self.get_primary_key(Tag._meta.db_table), "id")
# Alter to change the PK
id_field = Tag._meta.get_field("id")
old_field = Tag._meta.get_field("slug")
new_field = SlugField(primary_key=True)
new_field.set_attributes_from_name("slug")
new_field.model = Tag
with connection.schema_editor() as editor:
editor.remove_field(Tag, id_field)
editor.alter_field(Tag, old_field, new_field)
# Ensure the PK changed
self.assertNotIn(
"id",
self.get_indexes(Tag._meta.db_table),
)
self.assertEqual(self.get_primary_key(Tag._meta.db_table), "slug")
def test_alter_primary_key_the_same_name(self):
with connection.schema_editor() as editor:
editor.create_model(Thing)
old_field = Thing._meta.get_field("when")
new_field = CharField(max_length=2, primary_key=True)
new_field.set_attributes_from_name("when")
new_field.model = Thing
with connection.schema_editor() as editor:
editor.alter_field(Thing, old_field, new_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
with connection.schema_editor() as editor:
editor.alter_field(Thing, new_field, old_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
def test_context_manager_exit(self):
"""
Ensures transaction is correctly closed when an error occurs
inside a SchemaEditor context.
"""
class SomeError(Exception):
pass
try:
with connection.schema_editor():
raise SomeError
except SomeError:
self.assertFalse(connection.in_atomic_block)
@skipIfDBFeature("can_rollback_ddl")
def test_unsupported_transactional_ddl_disallowed(self):
message = (
"Executing DDL statements while in a transaction on databases "
"that can't perform a rollback is prohibited."
)
with atomic(), connection.schema_editor() as editor:
with self.assertRaisesMessage(TransactionManagementError, message):
editor.execute(
editor.sql_create_table % {"table": "foo", "definition": ""}
)
@skipUnlessDBFeature("supports_foreign_keys", "indexes_foreign_keys")
def test_foreign_key_index_long_names_regression(self):
"""
Regression test for #21497.
Only affects databases that supports foreign keys.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Find the properly shortened column name
column_name = connection.ops.quote_name(
"author_foreign_key_with_really_long_field_name_id"
)
column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase
# Ensure the table is there and has an index on the column
self.assertIn(
column_name,
self.get_indexes(BookWithLongName._meta.db_table),
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_add_foreign_key_long_names(self):
"""
Regression test for #23009.
Only affects databases that supports foreign keys.
"""
# Create the initial tables
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Add a second FK, this would fail due to long ref name before the fix
new_field = ForeignKey(
AuthorWithEvenLongerName, CASCADE, related_name="something"
)
new_field.set_attributes_from_name(
"author_other_really_long_named_i_mean_so_long_fk"
)
with connection.schema_editor() as editor:
editor.add_field(BookWithLongName, new_field)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_foreign_keys")
def test_add_foreign_key_quoted_db_table(self):
class Author(Model):
class Meta:
db_table = '"table_author_double_quoted"'
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
self.isolated_local_models = [Author]
if connection.vendor == "mysql":
self.assertForeignKeyExists(
Book, "author_id", '"table_author_double_quoted"'
)
else:
self.assertForeignKeyExists(Book, "author_id", "table_author_double_quoted")
def test_add_foreign_object(self):
with connection.schema_editor() as editor:
editor.create_model(BookForeignObj)
self.local_models = [BookForeignObj]
new_field = ForeignObject(
Author, on_delete=CASCADE, from_fields=["author_id"], to_fields=["id"]
)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.add_field(BookForeignObj, new_field)
def test_creation_deletion_reserved_names(self):
"""
Tries creating a model's table, and then deleting it when it has a
SQL reserved name.
"""
# Create the table
with connection.schema_editor() as editor:
try:
editor.create_model(Thing)
except OperationalError as e:
self.fail(
"Errors when applying initial migration for a model "
"with a table named after an SQL reserved word: %s" % e
)
# The table is there
list(Thing.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Thing)
# The table is gone
with self.assertRaises(DatabaseError):
list(Thing.objects.all())
def test_remove_constraints_capital_letters(self):
"""
#23065 - Constraint names must be quoted if they contain capital letters.
"""
def get_field(*args, field_class=IntegerField, **kwargs):
kwargs["db_column"] = "CamelCase"
field = field_class(*args, **kwargs)
field.set_attributes_from_name("CamelCase")
return field
model = Author
field = get_field()
table = model._meta.db_table
column = field.column
identifier_converter = connection.introspection.identifier_converter
with connection.schema_editor() as editor:
editor.create_model(model)
editor.add_field(model, field)
constraint_name = "CamelCaseIndex"
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(
editor.sql_create_index
% {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"using": "",
"columns": editor.quote_name(column),
"extra": "",
"condition": "",
"include": "",
}
)
self.assertIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
editor.alter_field(model, get_field(db_index=True), field, strict=True)
self.assertNotIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
constraint_name = "CamelCaseUniqConstraint"
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(editor._create_unique_sql(model, [field], constraint_name))
self.assertIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
editor.alter_field(model, get_field(unique=True), field, strict=True)
self.assertNotIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
if editor.sql_create_fk and connection.features.can_introspect_foreign_keys:
constraint_name = "CamelCaseFKConstraint"
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(
editor.sql_create_fk
% {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"column": editor.quote_name(column),
"to_table": editor.quote_name(table),
"to_column": editor.quote_name(model._meta.auto_field.column),
"deferrable": connection.ops.deferrable_sql(),
}
)
self.assertIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
editor.alter_field(
model,
get_field(Author, CASCADE, field_class=ForeignKey),
field,
strict=True,
)
self.assertNotIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
def test_add_field_use_effective_default(self):
"""
#23987 - effective_default() should be used as the field default when
adding a new field.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name="Anonymous1")
# Add new CharField to ensure default will be used from effective_default
new_field = CharField(max_length=15, blank=True)
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(
item[0],
None if connection.features.interprets_empty_strings_as_nulls else "",
)
def test_add_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name="Anonymous1")
# Add new CharField with a default
new_field = CharField(max_length=15, blank=True, default="surname default")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], "surname default")
# And that the default is no longer set in the database.
field = next(
f
for f in connection.introspection.get_table_description(
cursor, "schema_author"
)
if f.name == "surname"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_add_field_default_nullable(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add new nullable CharField with a default.
new_field = CharField(max_length=15, blank=True, null=True, default="surname")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
Author.objects.create(name="Anonymous1")
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertIsNone(item[0])
field = next(
f
for f in connection.introspection.get_table_description(
cursor,
"schema_author",
)
if f.name == "surname"
)
# Field is still nullable.
self.assertTrue(field.null_ok)
# The database default is no longer set.
if connection.features.can_introspect_default:
self.assertIn(field.default, ["NULL", None])
def test_add_textfield_default_nullable(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add new nullable TextField with a default.
new_field = TextField(blank=True, null=True, default="text")
new_field.set_attributes_from_name("description")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
Author.objects.create(name="Anonymous1")
with connection.cursor() as cursor:
cursor.execute("SELECT description FROM schema_author;")
item = cursor.fetchall()[0]
self.assertIsNone(item[0])
field = next(
f
for f in connection.introspection.get_table_description(
cursor,
"schema_author",
)
if f.name == "description"
)
# Field is still nullable.
self.assertTrue(field.null_ok)
# The database default is no longer set.
if connection.features.can_introspect_default:
self.assertIn(field.default, ["NULL", None])
def test_alter_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name="Anonymous1")
self.assertIsNone(Author.objects.get().height)
old_field = Author._meta.get_field("height")
# The default from the new field is used in updating existing rows.
new_field = IntegerField(blank=True, default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(Author.objects.get().height, 42)
# The database default should be removed.
with connection.cursor() as cursor:
field = next(
f
for f in connection.introspection.get_table_description(
cursor, "schema_author"
)
if f.name == "height"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_alter_field_default_doesnt_perform_queries(self):
"""
No queries are performed if a field default changes and the field's
not changing from null to non-null.
"""
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_default = old_field.default * 2
new_field = PositiveIntegerField(null=True, blank=True, default=new_default)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(
AuthorWithDefaultHeight, old_field, new_field, strict=True
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_fk_attributes_noop(self):
"""
No queries are performed when changing field attributes that don't
affect the schema.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
old_field = Book._meta.get_field("author")
new_field = ForeignKey(
Author,
blank=True,
editable=False,
error_messages={"invalid": "error message"},
help_text="help text",
limit_choices_to={"limit": "choice"},
on_delete=PROTECT,
related_name="related_name",
related_query_name="related_query_name",
validators=[lambda x: x],
verbose_name="verbose name",
)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Book, old_field, new_field, strict=True)
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Book, new_field, old_field, strict=True)
def test_alter_field_choices_noop(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("name")
new_field = CharField(
choices=(("Jane", "Jane"), ("Joe", "Joe")),
max_length=255,
)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Author, old_field, new_field, strict=True)
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Author, new_field, old_field, strict=True)
def test_add_textfield_unhashable_default(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name="Anonymous1")
# Create a field that has an unhashable default
new_field = TextField(default={})
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_add_indexed_charfield(self):
field = CharField(max_length=255, db_index=True)
field.set_attributes_from_name("nom_de_plume")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
# Should create two indexes; one for like operator.
self.assertEqual(
self.get_constraints_for_column(Author, "nom_de_plume"),
[
"schema_author_nom_de_plume_7570a851",
"schema_author_nom_de_plume_7570a851_like",
],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_add_unique_charfield(self):
field = CharField(max_length=255, unique=True)
field.set_attributes_from_name("nom_de_plume")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
# Should create two indexes; one for like operator.
self.assertEqual(
self.get_constraints_for_column(Author, "nom_de_plume"),
[
"schema_author_nom_de_plume_7570a851_like",
"schema_author_nom_de_plume_key",
],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_index_to_charfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
# Alter to add db_index=True and create 2 indexes.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, db_index=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, "name"),
["schema_author_name_1fbc5617", "schema_author_name_1fbc5617_like"],
)
# Remove db_index=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_unique_to_charfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
# Alter to add unique=True and create 2 indexes.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, "name"),
["schema_author_name_1fbc5617_like", "schema_author_name_1fbc5617_uniq"],
)
# Remove unique=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_index_to_textfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Note)
self.assertEqual(self.get_constraints_for_column(Note, "info"), [])
# Alter to add db_index=True and create 2 indexes.
old_field = Note._meta.get_field("info")
new_field = TextField(db_index=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Note, "info"),
["schema_note_info_4b0ea695", "schema_note_info_4b0ea695_like"],
)
# Remove db_index=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Note, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Note, "info"), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_unique_to_charfield_with_db_index(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
# Alter to add unique=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field("title")
new_field = CharField(max_length=100, db_index=True, unique=True)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"],
)
# Alter to remove unique=True (should drop unique index)
new_field2 = CharField(max_length=100, db_index=True)
new_field2.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_remove_unique_and_db_index_from_charfield(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
# Alter to add unique=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field("title")
new_field = CharField(max_length=100, db_index=True, unique=True)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"],
)
# Alter to remove both unique=True and db_index=True (should drop all indexes)
new_field2 = CharField(max_length=100)
new_field2.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"), []
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_swap_unique_and_db_index_with_charfield(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
# Alter to set unique=True and remove db_index=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field("title")
new_field = CharField(max_length=100, unique=True)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"],
)
# Alter to set db_index=True and remove unique=True (should restore index)
new_field2 = CharField(max_length=100, db_index=True)
new_field2.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_db_index_to_charfield_with_unique(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Tag)
self.assertEqual(
self.get_constraints_for_column(Tag, "slug"),
["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"],
)
# Alter to add db_index=True
old_field = Tag._meta.get_field("slug")
new_field = SlugField(db_index=True, unique=True)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Tag, "slug"),
["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"],
)
# Alter to remove db_index=True
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(Tag, "slug"),
["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"],
)
def test_alter_field_add_index_to_integerfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, "weight"), [])
# Alter to add db_index=True and create index.
old_field = Author._meta.get_field("weight")
new_field = IntegerField(null=True, db_index=True)
new_field.set_attributes_from_name("weight")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, "weight"),
["schema_author_weight_587740f9"],
)
# Remove db_index=True to drop index.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, "weight"), [])
def test_alter_pk_with_self_referential_field(self):
"""
Changing the primary key field name of a model with a self-referential
foreign key (#26384).
"""
with connection.schema_editor() as editor:
editor.create_model(Node)
old_field = Node._meta.get_field("node_id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(Node, old_field, new_field, strict=True)
self.assertForeignKeyExists(Node, "parent_id", Node._meta.db_table)
@mock.patch("django.db.backends.base.schema.datetime")
@mock.patch("django.db.backends.base.schema.timezone")
def test_add_datefield_and_datetimefield_use_effective_default(
self, mocked_datetime, mocked_tz
):
"""
effective_default() should be used for DateField, DateTimeField, and
TimeField if auto_now or auto_now_add is set (#25005).
"""
now = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1)
now_tz = datetime.datetime(
month=1, day=1, year=2000, hour=1, minute=1, tzinfo=datetime.timezone.utc
)
mocked_datetime.now = mock.MagicMock(return_value=now)
mocked_tz.now = mock.MagicMock(return_value=now_tz)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Check auto_now/auto_now_add attributes are not defined
columns = self.column_classes(Author)
self.assertNotIn("dob_auto_now", columns)
self.assertNotIn("dob_auto_now_add", columns)
self.assertNotIn("dtob_auto_now", columns)
self.assertNotIn("dtob_auto_now_add", columns)
self.assertNotIn("tob_auto_now", columns)
self.assertNotIn("tob_auto_now_add", columns)
# Create a row
Author.objects.create(name="Anonymous1")
# Ensure fields were added with the correct defaults
dob_auto_now = DateField(auto_now=True)
dob_auto_now.set_attributes_from_name("dob_auto_now")
self.check_added_field_default(
editor,
Author,
dob_auto_now,
"dob_auto_now",
now.date(),
cast_function=lambda x: x.date(),
)
dob_auto_now_add = DateField(auto_now_add=True)
dob_auto_now_add.set_attributes_from_name("dob_auto_now_add")
self.check_added_field_default(
editor,
Author,
dob_auto_now_add,
"dob_auto_now_add",
now.date(),
cast_function=lambda x: x.date(),
)
dtob_auto_now = DateTimeField(auto_now=True)
dtob_auto_now.set_attributes_from_name("dtob_auto_now")
self.check_added_field_default(
editor,
Author,
dtob_auto_now,
"dtob_auto_now",
now,
)
dt_tm_of_birth_auto_now_add = DateTimeField(auto_now_add=True)
dt_tm_of_birth_auto_now_add.set_attributes_from_name("dtob_auto_now_add")
self.check_added_field_default(
editor,
Author,
dt_tm_of_birth_auto_now_add,
"dtob_auto_now_add",
now,
)
tob_auto_now = TimeField(auto_now=True)
tob_auto_now.set_attributes_from_name("tob_auto_now")
self.check_added_field_default(
editor,
Author,
tob_auto_now,
"tob_auto_now",
now.time(),
cast_function=lambda x: x.time(),
)
tob_auto_now_add = TimeField(auto_now_add=True)
tob_auto_now_add.set_attributes_from_name("tob_auto_now_add")
self.check_added_field_default(
editor,
Author,
tob_auto_now_add,
"tob_auto_now_add",
now.time(),
cast_function=lambda x: x.time(),
)
def test_namespaced_db_table_create_index_name(self):
"""
Table names are stripped of their namespace/schema before being used to
generate index names.
"""
with connection.schema_editor() as editor:
max_name_length = connection.ops.max_name_length() or 200
namespace = "n" * max_name_length
table_name = "t" * max_name_length
namespaced_table_name = '"%s"."%s"' % (namespace, table_name)
self.assertEqual(
editor._create_index_name(table_name, []),
editor._create_index_name(namespaced_table_name, []),
)
@unittest.skipUnless(
connection.vendor == "oracle", "Oracle specific db_table syntax"
)
def test_creation_with_db_table_double_quotes(self):
oracle_user = connection.creation._test_database_user()
class Student(Model):
name = CharField(max_length=30)
class Meta:
app_label = "schema"
apps = new_apps
db_table = '"%s"."DJANGO_STUDENT_TABLE"' % oracle_user
class Document(Model):
name = CharField(max_length=30)
students = ManyToManyField(Student)
class Meta:
app_label = "schema"
apps = new_apps
db_table = '"%s"."DJANGO_DOCUMENT_TABLE"' % oracle_user
self.isolated_local_models = [Student, Document]
with connection.schema_editor() as editor:
editor.create_model(Student)
editor.create_model(Document)
doc = Document.objects.create(name="Test Name")
student = Student.objects.create(name="Some man")
doc.students.add(student)
@isolate_apps("schema")
@unittest.skipUnless(
connection.vendor == "postgresql", "PostgreSQL specific db_table syntax."
)
def test_namespaced_db_table_foreign_key_reference(self):
with connection.cursor() as cursor:
cursor.execute("CREATE SCHEMA django_schema_tests")
def delete_schema():
with connection.cursor() as cursor:
cursor.execute("DROP SCHEMA django_schema_tests CASCADE")
self.addCleanup(delete_schema)
class Author(Model):
class Meta:
app_label = "schema"
class Book(Model):
class Meta:
app_label = "schema"
db_table = '"django_schema_tests"."schema_book"'
author = ForeignKey(Author, CASCADE)
author.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.add_field(Book, author)
def test_rename_table_renames_deferred_sql_references(self):
atomic_rename = connection.features.supports_atomic_references_rename
with connection.schema_editor(atomic=atomic_rename) as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.alter_db_table(Author, "schema_author", "schema_renamed_author")
editor.alter_db_table(Author, "schema_book", "schema_renamed_book")
try:
self.assertGreater(len(editor.deferred_sql), 0)
for statement in editor.deferred_sql:
self.assertIs(statement.references_table("schema_author"), False)
self.assertIs(statement.references_table("schema_book"), False)
finally:
editor.alter_db_table(Author, "schema_renamed_author", "schema_author")
editor.alter_db_table(Author, "schema_renamed_book", "schema_book")
def test_rename_column_renames_deferred_sql_references(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
old_title = Book._meta.get_field("title")
new_title = CharField(max_length=100, db_index=True)
new_title.set_attributes_from_name("renamed_title")
editor.alter_field(Book, old_title, new_title)
old_author = Book._meta.get_field("author")
new_author = ForeignKey(Author, CASCADE)
new_author.set_attributes_from_name("renamed_author")
editor.alter_field(Book, old_author, new_author)
self.assertGreater(len(editor.deferred_sql), 0)
for statement in editor.deferred_sql:
self.assertIs(statement.references_column("book", "title"), False)
self.assertIs(statement.references_column("book", "author_id"), False)
@isolate_apps("schema")
def test_referenced_field_without_constraint_rename_inside_atomic_block(self):
"""
Foreign keys without database level constraint don't prevent the field
they reference from being renamed in an atomic block.
"""
class Foo(Model):
field = CharField(max_length=255, unique=True)
class Meta:
app_label = "schema"
class Bar(Model):
foo = ForeignKey(Foo, CASCADE, to_field="field", db_constraint=False)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo, Bar]
with connection.schema_editor() as editor:
editor.create_model(Foo)
editor.create_model(Bar)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("renamed")
with connection.schema_editor(atomic=True) as editor:
editor.alter_field(Foo, Foo._meta.get_field("field"), new_field)
@isolate_apps("schema")
def test_referenced_table_without_constraint_rename_inside_atomic_block(self):
"""
Foreign keys without database level constraint don't prevent the table
they reference from being renamed in an atomic block.
"""
class Foo(Model):
field = CharField(max_length=255, unique=True)
class Meta:
app_label = "schema"
class Bar(Model):
foo = ForeignKey(Foo, CASCADE, to_field="field", db_constraint=False)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo, Bar]
with connection.schema_editor() as editor:
editor.create_model(Foo)
editor.create_model(Bar)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("renamed")
with connection.schema_editor(atomic=True) as editor:
editor.alter_db_table(Foo, Foo._meta.db_table, "renamed_table")
Foo._meta.db_table = "renamed_table"
@isolate_apps("schema")
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_db_collation_charfield(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
class Foo(Model):
field = CharField(max_length=255, db_collation=collation)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo]
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.assertEqual(
self.get_column_collation(Foo._meta.db_table, "field"),
collation,
)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_collation_on_textfield")
def test_db_collation_textfield(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
class Foo(Model):
field = TextField(db_collation=collation)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo]
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.assertEqual(
self.get_column_collation(Foo._meta.db_table, "field"),
collation,
)
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_add_field_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Author)
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("alias")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertEqual(
columns["alias"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(columns["alias"][1][8], collation)
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_alter_field_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("name")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_column_collation(Author._meta.db_table, "name"),
collation,
)
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertIsNone(self.get_column_collation(Author._meta.db_table, "name"))
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_alter_primary_key_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Thing)
old_field = Thing._meta.get_field("when")
new_field = CharField(max_length=1, db_collation=collation, primary_key=True)
new_field.set_attributes_from_name("when")
new_field.model = Thing
with connection.schema_editor() as editor:
editor.alter_field(Thing, old_field, new_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
self.assertEqual(
self.get_column_collation(Thing._meta.db_table, "when"),
collation,
)
with connection.schema_editor() as editor:
editor.alter_field(Thing, new_field, old_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
self.assertIsNone(self.get_column_collation(Thing._meta.db_table, "when"))
@skipUnlessDBFeature(
"supports_collation_on_charfield", "supports_collation_on_textfield"
)
def test_alter_field_type_and_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("info")
new_field.model = Note
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(
columns["info"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(columns["info"][1][8], collation)
with connection.schema_editor() as editor:
editor.alter_field(Note, new_field, old_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns["info"][0], "TextField")
self.assertIsNone(columns["info"][1][8])
@skipUnlessDBFeature(
"supports_collation_on_charfield",
"supports_non_deterministic_collations",
)
def test_ci_cs_db_collation(self):
cs_collation = connection.features.test_collations.get("cs")
ci_collation = connection.features.test_collations.get("ci")
try:
if connection.vendor == "mysql":
cs_collation = "latin1_general_cs"
elif connection.vendor == "postgresql":
cs_collation = "en-x-icu"
with connection.cursor() as cursor:
cursor.execute(
"CREATE COLLATION IF NOT EXISTS case_insensitive "
"(provider = icu, locale = 'und-u-ks-level2', "
"deterministic = false)"
)
ci_collation = "case_insensitive"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
# Case-insensitive collation.
old_field = Author._meta.get_field("name")
new_field_ci = CharField(max_length=255, db_collation=ci_collation)
new_field_ci.set_attributes_from_name("name")
new_field_ci.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field_ci, strict=True)
Author.objects.create(name="ANDREW")
self.assertIs(Author.objects.filter(name="Andrew").exists(), True)
# Case-sensitive collation.
new_field_cs = CharField(max_length=255, db_collation=cs_collation)
new_field_cs.set_attributes_from_name("name")
new_field_cs.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field_ci, new_field_cs, strict=True)
self.assertIs(Author.objects.filter(name="Andrew").exists(), False)
finally:
if connection.vendor == "postgresql":
with connection.cursor() as cursor:
cursor.execute("DROP COLLATION IF EXISTS case_insensitive")
|
c6e824e7e1f43ceb21a5c9215b007a3270da76699612e2c1e5b19b45a3b036a7 | import operator
import uuid
from unittest import mock
from django import forms
from django.core import serializers
from django.core.exceptions import ValidationError
from django.core.serializers.json import DjangoJSONEncoder
from django.db import (
DataError,
IntegrityError,
NotSupportedError,
OperationalError,
connection,
models,
)
from django.db.models import (
Count,
ExpressionWrapper,
F,
IntegerField,
OuterRef,
Q,
Subquery,
Transform,
Value,
)
from django.db.models.expressions import RawSQL
from django.db.models.fields.json import (
KT,
KeyTextTransform,
KeyTransform,
KeyTransformFactory,
KeyTransformTextLookupMixin,
)
from django.db.models.functions import Cast
from django.test import SimpleTestCase, TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from .models import CustomJSONDecoder, JSONModel, NullableJSONModel, RelatedJSONModel
@skipUnlessDBFeature("supports_json_field")
class JSONFieldTests(TestCase):
def test_invalid_value(self):
msg = "is not JSON serializable"
with self.assertRaisesMessage(TypeError, msg):
NullableJSONModel.objects.create(
value={
"uuid": uuid.UUID("d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475"),
}
)
def test_custom_encoder_decoder(self):
value = {"uuid": uuid.UUID("{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}")}
obj = NullableJSONModel(value_custom=value)
obj.clean_fields()
obj.save()
obj.refresh_from_db()
self.assertEqual(obj.value_custom, value)
def test_db_check_constraints(self):
value = "{@!invalid json value 123 $!@#"
with mock.patch.object(DjangoJSONEncoder, "encode", return_value=value):
with self.assertRaises((IntegrityError, DataError, OperationalError)):
NullableJSONModel.objects.create(value_custom=value)
class TestMethods(SimpleTestCase):
def test_deconstruct(self):
field = models.JSONField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.JSONField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_deconstruct_custom_encoder_decoder(self):
field = models.JSONField(encoder=DjangoJSONEncoder, decoder=CustomJSONDecoder)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(kwargs["encoder"], DjangoJSONEncoder)
self.assertEqual(kwargs["decoder"], CustomJSONDecoder)
def test_get_transforms(self):
@models.JSONField.register_lookup
class MyTransform(Transform):
lookup_name = "my_transform"
field = models.JSONField()
transform = field.get_transform("my_transform")
self.assertIs(transform, MyTransform)
models.JSONField._unregister_lookup(MyTransform)
transform = field.get_transform("my_transform")
self.assertIsInstance(transform, KeyTransformFactory)
def test_key_transform_text_lookup_mixin_non_key_transform(self):
transform = Transform("test")
msg = (
"Transform should be an instance of KeyTransform in order to use "
"this lookup."
)
with self.assertRaisesMessage(TypeError, msg):
KeyTransformTextLookupMixin(transform)
class TestValidation(SimpleTestCase):
def test_invalid_encoder(self):
msg = "The encoder parameter must be a callable object."
with self.assertRaisesMessage(ValueError, msg):
models.JSONField(encoder=DjangoJSONEncoder())
def test_invalid_decoder(self):
msg = "The decoder parameter must be a callable object."
with self.assertRaisesMessage(ValueError, msg):
models.JSONField(decoder=CustomJSONDecoder())
def test_validation_error(self):
field = models.JSONField()
msg = "Value must be valid JSON."
value = uuid.UUID("{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}")
with self.assertRaisesMessage(ValidationError, msg):
field.clean({"uuid": value}, None)
def test_custom_encoder(self):
field = models.JSONField(encoder=DjangoJSONEncoder)
value = uuid.UUID("{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}")
field.clean({"uuid": value}, None)
class TestFormField(SimpleTestCase):
def test_formfield(self):
model_field = models.JSONField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, forms.JSONField)
def test_formfield_custom_encoder_decoder(self):
model_field = models.JSONField(
encoder=DjangoJSONEncoder, decoder=CustomJSONDecoder
)
form_field = model_field.formfield()
self.assertIs(form_field.encoder, DjangoJSONEncoder)
self.assertIs(form_field.decoder, CustomJSONDecoder)
class TestSerialization(SimpleTestCase):
test_data = (
'[{"fields": {"value": %s}, "model": "model_fields.jsonmodel", "pk": null}]'
)
test_values = (
# (Python value, serialized value),
({"a": "b", "c": None}, '{"a": "b", "c": null}'),
("abc", '"abc"'),
('{"a": "a"}', '"{\\"a\\": \\"a\\"}"'),
)
def test_dumping(self):
for value, serialized in self.test_values:
with self.subTest(value=value):
instance = JSONModel(value=value)
data = serializers.serialize("json", [instance])
self.assertJSONEqual(data, self.test_data % serialized)
def test_loading(self):
for value, serialized in self.test_values:
with self.subTest(value=value):
instance = list(
serializers.deserialize("json", self.test_data % serialized)
)[0].object
self.assertEqual(instance.value, value)
def test_xml_serialization(self):
test_xml_data = (
'<django-objects version="1.0">'
'<object model="model_fields.nullablejsonmodel">'
'<field name="value" type="JSONField">%s'
"</field></object></django-objects>"
)
for value, serialized in self.test_values:
with self.subTest(value=value):
instance = NullableJSONModel(value=value)
data = serializers.serialize("xml", [instance], fields=["value"])
self.assertXMLEqual(data, test_xml_data % serialized)
new_instance = list(serializers.deserialize("xml", data))[0].object
self.assertEqual(new_instance.value, instance.value)
@skipUnlessDBFeature("supports_json_field")
class TestSaveLoad(TestCase):
def test_null(self):
obj = NullableJSONModel(value=None)
obj.save()
obj.refresh_from_db()
self.assertIsNone(obj.value)
@skipUnlessDBFeature("supports_primitives_in_json_field")
def test_json_null_different_from_sql_null(self):
json_null = NullableJSONModel.objects.create(value=Value("null"))
json_null.refresh_from_db()
sql_null = NullableJSONModel.objects.create(value=None)
sql_null.refresh_from_db()
# 'null' is not equal to NULL in the database.
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value=Value("null")),
[json_null],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value=None),
[json_null],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__isnull=True),
[sql_null],
)
# 'null' is equal to NULL in Python (None).
self.assertEqual(json_null.value, sql_null.value)
@skipUnlessDBFeature("supports_primitives_in_json_field")
def test_primitives(self):
values = [
True,
1,
1.45,
"String",
"",
]
for value in values:
with self.subTest(value=value):
obj = JSONModel(value=value)
obj.save()
obj.refresh_from_db()
self.assertEqual(obj.value, value)
def test_dict(self):
values = [
{},
{"name": "John", "age": 20, "height": 180.3},
{"a": True, "b": {"b1": False, "b2": None}},
]
for value in values:
with self.subTest(value=value):
obj = JSONModel.objects.create(value=value)
obj.refresh_from_db()
self.assertEqual(obj.value, value)
def test_list(self):
values = [
[],
["John", 20, 180.3],
[True, [False, None]],
]
for value in values:
with self.subTest(value=value):
obj = JSONModel.objects.create(value=value)
obj.refresh_from_db()
self.assertEqual(obj.value, value)
def test_realistic_object(self):
value = {
"name": "John",
"age": 20,
"pets": [
{"name": "Kit", "type": "cat", "age": 2},
{"name": "Max", "type": "dog", "age": 1},
],
"courses": [
["A1", "A2", "A3"],
["B1", "B2"],
["C1"],
],
}
obj = JSONModel.objects.create(value=value)
obj.refresh_from_db()
self.assertEqual(obj.value, value)
@skipUnlessDBFeature("supports_json_field")
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.primitives = [True, False, "yes", 7, 9.6]
values = [
None,
[],
{},
{"a": "b", "c": 14},
{
"a": "b",
"c": 14,
"d": ["e", {"f": "g"}],
"h": True,
"i": False,
"j": None,
"k": {"l": "m"},
"n": [None, True, False],
"o": '"quoted"',
"p": 4.2,
"r": {"s": True, "t": False},
},
[1, [2]],
{"k": True, "l": False, "foo": "bax"},
{
"foo": "bar",
"baz": {"a": "b", "c": "d"},
"bar": ["foo", "bar"],
"bax": {"foo": "bar"},
},
]
cls.objs = [NullableJSONModel.objects.create(value=value) for value in values]
if connection.features.supports_primitives_in_json_field:
cls.objs.extend(
[
NullableJSONModel.objects.create(value=value)
for value in cls.primitives
]
)
cls.raw_sql = "%s::jsonb" if connection.vendor == "postgresql" else "%s"
def test_exact(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__exact={}),
[self.objs[2]],
)
def test_exact_complex(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__exact={"a": "b", "c": 14}),
[self.objs[3]],
)
def test_icontains(self):
self.assertCountEqual(
NullableJSONModel.objects.filter(value__icontains="BaX"),
self.objs[6:8],
)
def test_isnull(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__isnull=True),
[self.objs[0]],
)
def test_ordering_by_transform(self):
mariadb = connection.vendor == "mysql" and connection.mysql_is_mariadb
values = [
{"ord": 93, "name": "bar"},
{"ord": 22.1, "name": "foo"},
{"ord": -1, "name": "baz"},
{"ord": 21.931902, "name": "spam"},
{"ord": -100291029, "name": "eggs"},
]
for field_name in ["value", "value_custom"]:
with self.subTest(field=field_name):
objs = [
NullableJSONModel.objects.create(**{field_name: value})
for value in values
]
query = NullableJSONModel.objects.filter(
**{"%s__name__isnull" % field_name: False},
).order_by("%s__ord" % field_name)
expected = [objs[4], objs[2], objs[3], objs[1], objs[0]]
if mariadb or connection.vendor == "oracle":
# MariaDB and Oracle return JSON values as strings.
expected = [objs[2], objs[4], objs[3], objs[1], objs[0]]
self.assertSequenceEqual(query, expected)
def test_ordering_grouping_by_key_transform(self):
base_qs = NullableJSONModel.objects.filter(value__d__0__isnull=False)
for qs in (
base_qs.order_by("value__d__0"),
base_qs.annotate(
key=KeyTransform("0", KeyTransform("d", "value"))
).order_by("key"),
):
self.assertSequenceEqual(qs, [self.objs[4]])
none_val = "" if connection.features.interprets_empty_strings_as_nulls else None
qs = NullableJSONModel.objects.filter(value__isnull=False)
self.assertQuerysetEqual(
qs.filter(value__isnull=False)
.annotate(key=KT("value__d__1__f"))
.values("key")
.annotate(count=Count("key"))
.order_by("count"),
[(none_val, 0), ("g", 1)],
operator.itemgetter("key", "count"),
)
def test_ordering_grouping_by_count(self):
qs = (
NullableJSONModel.objects.filter(
value__isnull=False,
)
.values("value__d__0")
.annotate(count=Count("value__d__0"))
.order_by("count")
)
self.assertQuerysetEqual(qs, [0, 1], operator.itemgetter("count"))
def test_order_grouping_custom_decoder(self):
NullableJSONModel.objects.create(value_custom={"a": "b"})
qs = NullableJSONModel.objects.filter(value_custom__isnull=False)
self.assertSequenceEqual(
qs.values(
"value_custom__a",
)
.annotate(
count=Count("id"),
)
.order_by("value_custom__a"),
[{"value_custom__a": "b", "count": 1}],
)
def test_key_transform_raw_expression(self):
expr = RawSQL(self.raw_sql, ['{"x": "bar"}'])
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__foo=KeyTransform("x", expr)),
[self.objs[7]],
)
def test_nested_key_transform_raw_expression(self):
expr = RawSQL(self.raw_sql, ['{"x": {"y": "bar"}}'])
self.assertSequenceEqual(
NullableJSONModel.objects.filter(
value__foo=KeyTransform("y", KeyTransform("x", expr))
),
[self.objs[7]],
)
def test_key_transform_expression(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
key=KeyTransform("d", "value"),
chain=KeyTransform("0", "key"),
expr=KeyTransform("0", Cast("key", models.JSONField())),
)
.filter(chain=F("expr")),
[self.objs[4]],
)
def test_key_transform_annotation_expression(self):
obj = NullableJSONModel.objects.create(value={"d": ["e", "e"]})
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
key=F("value__d"),
chain=F("key__0"),
expr=Cast("key", models.JSONField()),
)
.filter(chain=F("expr__1")),
[obj],
)
def test_nested_key_transform_expression(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
key=KeyTransform("d", "value"),
chain=KeyTransform("f", KeyTransform("1", "key")),
expr=KeyTransform(
"f", KeyTransform("1", Cast("key", models.JSONField()))
),
)
.filter(chain=F("expr")),
[self.objs[4]],
)
def test_nested_key_transform_annotation_expression(self):
obj = NullableJSONModel.objects.create(
value={"d": ["e", {"f": "g"}, {"f": "g"}]},
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
key=F("value__d"),
chain=F("key__1__f"),
expr=Cast("key", models.JSONField()),
)
.filter(chain=F("expr__2__f")),
[obj],
)
def test_nested_key_transform_on_subquery(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
subquery_value=Subquery(
NullableJSONModel.objects.filter(pk=OuterRef("pk")).values("value")
),
key=KeyTransform("d", "subquery_value"),
chain=KeyTransform("f", KeyTransform("1", "key")),
)
.filter(chain="g"),
[self.objs[4]],
)
def test_key_text_transform_char_lookup(self):
qs = NullableJSONModel.objects.annotate(
char_value=KeyTextTransform("foo", "value"),
).filter(char_value__startswith="bar")
self.assertSequenceEqual(qs, [self.objs[7]])
qs = NullableJSONModel.objects.annotate(
char_value=KeyTextTransform(1, KeyTextTransform("bar", "value")),
).filter(char_value__startswith="bar")
self.assertSequenceEqual(qs, [self.objs[7]])
def test_expression_wrapper_key_transform(self):
self.assertCountEqual(
NullableJSONModel.objects.annotate(
expr=ExpressionWrapper(
KeyTransform("c", "value"),
output_field=IntegerField(),
),
).filter(expr__isnull=False),
self.objs[3:5],
)
def test_has_key(self):
self.assertCountEqual(
NullableJSONModel.objects.filter(value__has_key="a"),
[self.objs[3], self.objs[4]],
)
def test_has_key_null_value(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_key="j"),
[self.objs[4]],
)
def test_has_key_deep(self):
tests = [
(Q(value__baz__has_key="a"), self.objs[7]),
(
Q(value__has_key=KeyTransform("a", KeyTransform("baz", "value"))),
self.objs[7],
),
(Q(value__has_key=F("value__baz__a")), self.objs[7]),
(
Q(value__has_key=KeyTransform("c", KeyTransform("baz", "value"))),
self.objs[7],
),
(Q(value__has_key=F("value__baz__c")), self.objs[7]),
(Q(value__d__1__has_key="f"), self.objs[4]),
(
Q(
value__has_key=KeyTransform(
"f", KeyTransform("1", KeyTransform("d", "value"))
)
),
self.objs[4],
),
(Q(value__has_key=F("value__d__1__f")), self.objs[4]),
]
for condition, expected in tests:
with self.subTest(condition=condition):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition),
[expected],
)
def test_has_key_list(self):
obj = NullableJSONModel.objects.create(value=[{"a": 1}, {"b": "x"}])
tests = [
Q(value__1__has_key="b"),
Q(value__has_key=KeyTransform("b", KeyTransform(1, "value"))),
Q(value__has_key=KeyTransform("b", KeyTransform("1", "value"))),
Q(value__has_key=F("value__1__b")),
]
for condition in tests:
with self.subTest(condition=condition):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition),
[obj],
)
def test_has_keys(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_keys=["a", "c", "h"]),
[self.objs[4]],
)
def test_has_any_keys(self):
self.assertCountEqual(
NullableJSONModel.objects.filter(value__has_any_keys=["c", "l"]),
[self.objs[3], self.objs[4], self.objs[6]],
)
def test_has_key_number(self):
obj = NullableJSONModel.objects.create(
value={
"123": "value",
"nested": {"456": "bar", "lorem": "abc", "999": True},
"array": [{"789": "baz", "777": "def", "ipsum": 200}],
"000": "val",
}
)
tests = [
Q(value__has_key="123"),
Q(value__nested__has_key="456"),
Q(value__array__0__has_key="789"),
Q(value__has_keys=["nested", "123", "array", "000"]),
Q(value__nested__has_keys=["lorem", "999", "456"]),
Q(value__array__0__has_keys=["789", "ipsum", "777"]),
Q(value__has_any_keys=["000", "nonexistent"]),
Q(value__nested__has_any_keys=["999", "nonexistent"]),
Q(value__array__0__has_any_keys=["777", "nonexistent"]),
]
for condition in tests:
with self.subTest(condition=condition):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition),
[obj],
)
@skipUnlessDBFeature("supports_json_field_contains")
def test_contains(self):
tests = [
({}, self.objs[2:5] + self.objs[6:8]),
({"baz": {"a": "b", "c": "d"}}, [self.objs[7]]),
({"baz": {"a": "b"}}, [self.objs[7]]),
({"baz": {"c": "d"}}, [self.objs[7]]),
({"k": True, "l": False}, [self.objs[6]]),
({"d": ["e", {"f": "g"}]}, [self.objs[4]]),
({"d": ["e"]}, [self.objs[4]]),
({"d": [{"f": "g"}]}, [self.objs[4]]),
([1, [2]], [self.objs[5]]),
([1], [self.objs[5]]),
([[2]], [self.objs[5]]),
({"n": [None, True, False]}, [self.objs[4]]),
({"j": None}, [self.objs[4]]),
]
for value, expected in tests:
with self.subTest(value=value):
qs = NullableJSONModel.objects.filter(value__contains=value)
self.assertCountEqual(qs, expected)
@skipIfDBFeature("supports_json_field_contains")
def test_contains_unsupported(self):
msg = "contains lookup is not supported on this database backend."
with self.assertRaisesMessage(NotSupportedError, msg):
NullableJSONModel.objects.filter(
value__contains={"baz": {"a": "b", "c": "d"}},
).get()
@skipUnlessDBFeature(
"supports_primitives_in_json_field",
"supports_json_field_contains",
)
def test_contains_primitives(self):
for value in self.primitives:
with self.subTest(value=value):
qs = NullableJSONModel.objects.filter(value__contains=value)
self.assertIs(qs.exists(), True)
@skipUnlessDBFeature("supports_json_field_contains")
def test_contained_by(self):
qs = NullableJSONModel.objects.filter(
value__contained_by={"a": "b", "c": 14, "h": True}
)
self.assertCountEqual(qs, self.objs[2:4])
@skipIfDBFeature("supports_json_field_contains")
def test_contained_by_unsupported(self):
msg = "contained_by lookup is not supported on this database backend."
with self.assertRaisesMessage(NotSupportedError, msg):
NullableJSONModel.objects.filter(value__contained_by={"a": "b"}).get()
def test_deep_values(self):
qs = NullableJSONModel.objects.values_list("value__k__l").order_by("pk")
expected_objs = [(None,)] * len(self.objs)
expected_objs[4] = ("m",)
self.assertSequenceEqual(qs, expected_objs)
@skipUnlessDBFeature("can_distinct_on_fields")
def test_deep_distinct(self):
query = NullableJSONModel.objects.distinct("value__k__l").values_list(
"value__k__l"
)
self.assertSequenceEqual(query, [("m",), (None,)])
def test_isnull_key(self):
# key__isnull=False works the same as has_key='key'.
self.assertCountEqual(
NullableJSONModel.objects.filter(value__a__isnull=True),
self.objs[:3] + self.objs[5:],
)
self.assertCountEqual(
NullableJSONModel.objects.filter(value__j__isnull=True),
self.objs[:4] + self.objs[5:],
)
self.assertCountEqual(
NullableJSONModel.objects.filter(value__a__isnull=False),
[self.objs[3], self.objs[4]],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__j__isnull=False),
[self.objs[4]],
)
def test_isnull_key_or_none(self):
obj = NullableJSONModel.objects.create(value={"a": None})
self.assertCountEqual(
NullableJSONModel.objects.filter(
Q(value__a__isnull=True) | Q(value__a=None)
),
self.objs[:3] + self.objs[5:] + [obj],
)
def test_none_key(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__j=None),
[self.objs[4]],
)
def test_none_key_exclude(self):
obj = NullableJSONModel.objects.create(value={"j": 1})
if connection.vendor == "oracle":
# Oracle supports filtering JSON objects with NULL keys, but the
# current implementation doesn't support it.
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(value__j=None),
self.objs[1:4] + self.objs[5:] + [obj],
)
else:
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(value__j=None), [obj]
)
def test_shallow_list_lookup(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__0=1),
[self.objs[5]],
)
def test_shallow_obj_lookup(self):
self.assertCountEqual(
NullableJSONModel.objects.filter(value__a="b"),
[self.objs[3], self.objs[4]],
)
def test_obj_subquery_lookup(self):
qs = NullableJSONModel.objects.annotate(
field=Subquery(
NullableJSONModel.objects.filter(pk=OuterRef("pk")).values("value")
),
).filter(field__a="b")
self.assertCountEqual(qs, [self.objs[3], self.objs[4]])
def test_deep_lookup_objs(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__k__l="m"),
[self.objs[4]],
)
def test_shallow_lookup_obj_target(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__k={"l": "m"}),
[self.objs[4]],
)
def test_deep_lookup_array(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__1__0=2),
[self.objs[5]],
)
def test_deep_lookup_mixed(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__1__f="g"),
[self.objs[4]],
)
def test_deep_lookup_transform(self):
self.assertCountEqual(
NullableJSONModel.objects.filter(value__c__gt=2),
[self.objs[3], self.objs[4]],
)
self.assertCountEqual(
NullableJSONModel.objects.filter(value__c__gt=2.33),
[self.objs[3], self.objs[4]],
)
self.assertIs(NullableJSONModel.objects.filter(value__c__lt=5).exists(), False)
def test_lookup_exclude(self):
tests = [
(Q(value__a="b"), [self.objs[0]]),
(Q(value__foo="bax"), [self.objs[0], self.objs[7]]),
]
for condition, expected in tests:
self.assertCountEqual(
NullableJSONModel.objects.exclude(condition),
expected,
)
self.assertCountEqual(
NullableJSONModel.objects.filter(~condition),
expected,
)
def test_lookup_exclude_nonexistent_key(self):
# Values without the key are ignored.
condition = Q(value__foo="bax")
objs_with_value = [self.objs[6]]
objs_with_different_value = [self.objs[0], self.objs[7]]
self.assertCountEqual(
NullableJSONModel.objects.exclude(condition),
objs_with_different_value,
)
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(~condition),
objs_with_value,
)
self.assertCountEqual(
NullableJSONModel.objects.filter(condition | ~condition),
objs_with_value + objs_with_different_value,
)
self.assertCountEqual(
NullableJSONModel.objects.exclude(condition & ~condition),
objs_with_value + objs_with_different_value,
)
# Add the __isnull lookup to get an exhaustive set.
self.assertCountEqual(
NullableJSONModel.objects.exclude(condition & Q(value__foo__isnull=False)),
self.objs[0:6] + self.objs[7:],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition & Q(value__foo__isnull=False)),
objs_with_value,
)
def test_usage_in_subquery(self):
self.assertCountEqual(
NullableJSONModel.objects.filter(
id__in=NullableJSONModel.objects.filter(value__c=14),
),
self.objs[3:5],
)
@skipUnlessDBFeature("supports_json_field_contains")
def test_array_key_contains(self):
tests = [
([], [self.objs[7]]),
("bar", [self.objs[7]]),
(["bar"], [self.objs[7]]),
("ar", []),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__bar__contains=value),
expected,
)
def test_key_iexact(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__iexact="BaR").exists(), True
)
self.assertIs(
NullableJSONModel.objects.filter(value__foo__iexact='"BaR"').exists(), False
)
def test_key_in(self):
tests = [
("value__c__in", [14], self.objs[3:5]),
("value__c__in", [14, 15], self.objs[3:5]),
("value__0__in", [1], [self.objs[5]]),
("value__0__in", [1, 3], [self.objs[5]]),
("value__foo__in", ["bar"], [self.objs[7]]),
(
"value__foo__in",
[KeyTransform("foo", KeyTransform("bax", "value"))],
[self.objs[7]],
),
("value__foo__in", [F("value__bax__foo")], [self.objs[7]]),
(
"value__foo__in",
[KeyTransform("foo", KeyTransform("bax", "value")), "baz"],
[self.objs[7]],
),
("value__foo__in", [F("value__bax__foo"), "baz"], [self.objs[7]]),
("value__foo__in", ["bar", "baz"], [self.objs[7]]),
("value__bar__in", [["foo", "bar"]], [self.objs[7]]),
("value__bar__in", [["foo", "bar"], ["a"]], [self.objs[7]]),
("value__bax__in", [{"foo": "bar"}, {"a": "b"}], [self.objs[7]]),
("value__h__in", [True, "foo"], [self.objs[4]]),
("value__i__in", [False, "foo"], [self.objs[4]]),
]
for lookup, value, expected in tests:
with self.subTest(lookup=lookup, value=value):
self.assertCountEqual(
NullableJSONModel.objects.filter(**{lookup: value}),
expected,
)
def test_key_values(self):
qs = NullableJSONModel.objects.filter(value__h=True)
tests = [
("value__a", "b"),
("value__c", 14),
("value__d", ["e", {"f": "g"}]),
("value__h", True),
("value__i", False),
("value__j", None),
("value__k", {"l": "m"}),
("value__n", [None, True, False]),
("value__p", 4.2),
("value__r", {"s": True, "t": False}),
]
for lookup, expected in tests:
with self.subTest(lookup=lookup):
self.assertEqual(qs.values_list(lookup, flat=True).get(), expected)
def test_key_values_boolean(self):
qs = NullableJSONModel.objects.filter(value__h=True, value__i=False)
tests = [
("value__h", True),
("value__i", False),
]
for lookup, expected in tests:
with self.subTest(lookup=lookup):
self.assertIs(qs.values_list(lookup, flat=True).get(), expected)
@skipUnlessDBFeature("supports_json_field_contains")
def test_key_contains(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__contains="ar").exists(), False
)
self.assertIs(
NullableJSONModel.objects.filter(value__foo__contains="bar").exists(), True
)
def test_key_icontains(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__icontains="Ar").exists(), True
)
def test_key_startswith(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__startswith="b").exists(), True
)
def test_key_istartswith(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__istartswith="B").exists(), True
)
def test_key_endswith(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__endswith="r").exists(), True
)
def test_key_iendswith(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__iendswith="R").exists(), True
)
def test_key_regex(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__regex=r"^bar$").exists(), True
)
def test_key_iregex(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__iregex=r"^bAr$").exists(), True
)
def test_key_quoted_string(self):
self.assertEqual(
NullableJSONModel.objects.filter(value__o='"quoted"').get(),
self.objs[4],
)
@skipUnlessDBFeature("has_json_operators")
def test_key_sql_injection(self):
with CaptureQueriesContext(connection) as queries:
self.assertIs(
NullableJSONModel.objects.filter(
**{
"""value__test' = '"a"') OR 1 = 1 OR ('d""": "x",
}
).exists(),
False,
)
self.assertIn(
"""."value" -> 'test'' = ''"a"'') OR 1 = 1 OR (''d') = '"x"' """,
queries[0]["sql"],
)
@skipIfDBFeature("has_json_operators")
def test_key_sql_injection_escape(self):
query = str(
JSONModel.objects.filter(
**{
"""value__test") = '"a"' OR 1 = 1 OR ("d""": "x",
}
).query
)
self.assertIn('"test\\"', query)
self.assertIn('\\"d', query)
def test_key_escape(self):
obj = NullableJSONModel.objects.create(value={"%total": 10})
self.assertEqual(
NullableJSONModel.objects.filter(**{"value__%total": 10}).get(), obj
)
def test_none_key_and_exact_lookup(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__a="b", value__j=None),
[self.objs[4]],
)
def test_lookups_with_key_transform(self):
tests = (
("value__baz__has_key", "c"),
("value__baz__has_keys", ["a", "c"]),
("value__baz__has_any_keys", ["a", "x"]),
("value__has_key", KeyTextTransform("foo", "value")),
)
for lookup, value in tests:
with self.subTest(lookup=lookup):
self.assertIs(
NullableJSONModel.objects.filter(
**{lookup: value},
).exists(),
True,
)
@skipUnlessDBFeature("supports_json_field_contains")
def test_contains_contained_by_with_key_transform(self):
tests = [
("value__d__contains", "e"),
("value__d__contains", [{"f": "g"}]),
("value__contains", KeyTransform("bax", "value")),
("value__contains", F("value__bax")),
("value__baz__contains", {"a": "b"}),
("value__baz__contained_by", {"a": "b", "c": "d", "e": "f"}),
(
"value__contained_by",
KeyTransform(
"x",
RawSQL(
self.raw_sql,
['{"x": {"a": "b", "c": 1, "d": "e"}}'],
),
),
),
]
# For databases where {'f': 'g'} (without surrounding []) matches
# [{'f': 'g'}].
if not connection.features.json_key_contains_list_matching_requires_list:
tests.append(("value__d__contains", {"f": "g"}))
for lookup, value in tests:
with self.subTest(lookup=lookup, value=value):
self.assertIs(
NullableJSONModel.objects.filter(
**{lookup: value},
).exists(),
True,
)
def test_join_key_transform_annotation_expression(self):
related_obj = RelatedJSONModel.objects.create(
value={"d": ["f", "e"]},
json_model=self.objs[4],
)
RelatedJSONModel.objects.create(
value={"d": ["e", "f"]},
json_model=self.objs[4],
)
self.assertSequenceEqual(
RelatedJSONModel.objects.annotate(
key=F("value__d"),
related_key=F("json_model__value__d"),
chain=F("key__1"),
expr=Cast("key", models.JSONField()),
).filter(chain=F("related_key__0")),
[related_obj],
)
def test_key_text_transform_from_lookup(self):
qs = NullableJSONModel.objects.annotate(b=KT("value__bax__foo")).filter(
b__contains="ar",
)
self.assertSequenceEqual(qs, [self.objs[7]])
qs = NullableJSONModel.objects.annotate(c=KT("value__o")).filter(
c__contains="uot",
)
self.assertSequenceEqual(qs, [self.objs[4]])
def test_key_text_transform_from_lookup_invalid(self):
msg = "Lookup must contain key or index transforms."
with self.assertRaisesMessage(ValueError, msg):
KT("value")
with self.assertRaisesMessage(ValueError, msg):
KT("")
|
bf7e230fdad45f97b0f01fcefcb4deea2a0528e31ef2ec34b2263e1cd5888ddc | import datetime
import re
import sys
from contextlib import contextmanager
from unittest import SkipTest, skipIf
from xml.dom.minidom import parseString
try:
import zoneinfo
except ImportError:
from backports import zoneinfo
try:
import pytz
except ImportError:
pytz = None
from django.contrib.auth.models import User
from django.core import serializers
from django.db import connection
from django.db.models import F, Max, Min
from django.http import HttpRequest
from django.template import (
Context,
RequestContext,
Template,
TemplateSyntaxError,
context_processors,
)
from django.test import (
SimpleTestCase,
TestCase,
TransactionTestCase,
ignore_warnings,
override_settings,
skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.utils import requires_tz_support
from django.urls import reverse
from django.utils import timezone
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.timezone import timedelta
from .forms import (
EventForm,
EventLocalizedForm,
EventLocalizedModelForm,
EventModelForm,
EventSplitForm,
)
from .models import (
AllDayEvent,
DailyEvent,
Event,
MaybeEvent,
Session,
SessionEvent,
Timestamp,
)
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
# These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time)
# who don't have daylight saving time, so we can represent them easily
# with fixed offset timezones and use them directly as tzinfo in the
# constructors.
# settings.TIME_ZONE is forced to EAT. Most tests use a variant of
# datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to
# 10:20:30 in UTC and 17:20:30 in ICT.
UTC = datetime.timezone.utc
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
ZONE_CONSTRUCTORS = (zoneinfo.ZoneInfo,)
if pytz is not None:
ZONE_CONSTRUCTORS += (pytz.timezone,)
def get_timezones(key):
return [constructor(key) for constructor in ZONE_CONSTRUCTORS]
class UTCAliasTests(SimpleTestCase):
def test_alias_deprecation_warning(self):
msg = (
"The django.utils.timezone.utc alias is deprecated. "
"Please update your code to use datetime.timezone.utc instead."
)
with self.assertRaisesMessage(RemovedInDjango50Warning, msg):
timezone.utc
def test_timezone_module_dir_includes_utc(self):
self.assertIn("utc", dir(timezone))
@contextmanager
def override_database_connection_timezone(timezone):
try:
orig_timezone = connection.settings_dict["TIME_ZONE"]
connection.settings_dict["TIME_ZONE"] = timezone
# Clear cached properties, after first accessing them to ensure they exist.
connection.timezone
del connection.timezone
connection.timezone_name
del connection.timezone_name
yield
finally:
connection.settings_dict["TIME_ZONE"] = orig_timezone
# Clear cached properties, after first accessing them to ensure they exist.
connection.timezone
del connection.timezone
connection.timezone_name
del connection.timezone_name
@override_settings(TIME_ZONE="Africa/Nairobi", USE_TZ=False)
class LegacyDatabaseTests(TestCase):
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature("supports_timezones")
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature("supports_timezones")
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature("supports_timezones")
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature("supports_timezones")
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipIfDBFeature("supports_timezones")
def test_aware_datetime_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
msg = "backend does not support timezone-aware datetimes when USE_TZ is False."
with self.assertRaisesMessage(ValueError, msg):
Event.objects.create(dt=dt)
def test_auto_now_and_auto_now_add(self):
now = datetime.datetime.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__iso_week_day=6).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40))
result = Event.objects.aggregate(Min("dt"), Max("dt"))
self.assertEqual(
result,
{
"dt__min": datetime.datetime(2011, 9, 1, 3, 20, 40),
"dt__max": datetime.datetime(2011, 9, 1, 23, 20, 20),
},
)
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name="morning")
afternoon = Session.objects.create(name="afternoon")
SessionEvent.objects.create(
dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon
)
SessionEvent.objects.create(
dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon
)
SessionEvent.objects.create(
dt=datetime.datetime(2011, 9, 1, 3, 20, 40), session=morning
)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min("events__dt")).order_by("dt"),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt,
)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min("events__dt")).filter(
dt__lt=afternoon_min_dt
),
[morning_min_dt],
transform=lambda d: d.dt,
)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min("events__dt")).filter(
dt__gte=afternoon_min_dt
),
[afternoon_min_dt],
transform=lambda d: d.dt,
)
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertSequenceEqual(
Event.objects.datetimes("dt", "year"),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
)
self.assertSequenceEqual(
Event.objects.datetimes("dt", "month"),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
)
self.assertSequenceEqual(
Event.objects.datetimes("dt", "day"),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
)
self.assertSequenceEqual(
Event.objects.datetimes("dt", "hour"),
[
datetime.datetime(2011, 1, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 4, 0, 0),
],
)
self.assertSequenceEqual(
Event.objects.datetimes("dt", "minute"),
[
datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0),
],
)
self.assertSequenceEqual(
Event.objects.datetimes("dt", "second"),
[
datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0),
],
)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
event = Event.objects.create(dt=dt)
self.assertEqual(
list(
Event.objects.raw("SELECT * FROM timezones_event WHERE dt = %s", [dt])
),
[event],
)
def test_cursor_execute_accepts_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with connection.cursor() as cursor:
cursor.execute("INSERT INTO timezones_event (dt) VALUES (%s)", [dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_cursor_execute_returns_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute("SELECT dt FROM timezones_event WHERE dt = %s", [dt])
self.assertEqual(cursor.fetchall()[0][0], dt)
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertTrue(AllDayEvent.objects.filter(day__gte=dt).exists())
@override_settings(TIME_ZONE="Africa/Nairobi", USE_TZ=True)
class NewDatabaseTests(TestCase):
naive_warning = "DateTimeField Event.dt received a naive datetime"
@skipIfDBFeature("supports_timezones")
def test_aware_time_unsupported(self):
t = datetime.time(13, 20, 30, tzinfo=EAT)
msg = "backend does not support timezone-aware times."
with self.assertRaisesMessage(ValueError, msg):
DailyEvent.objects.create(time=t)
@requires_tz_support
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with self.assertWarnsMessage(RuntimeWarning, self.naive_warning):
Event.objects.create(dt=dt)
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
def test_datetime_from_date(self):
dt = datetime.date(2011, 9, 1)
with self.assertWarnsMessage(RuntimeWarning, self.naive_warning):
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, datetime.datetime(2011, 9, 1, tzinfo=EAT))
@requires_tz_support
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with self.assertWarnsMessage(RuntimeWarning, self.naive_warning):
Event.objects.create(dt=dt)
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_auto_now_and_auto_now_add(self):
now = timezone.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
def test_query_filter_with_pytz_timezones(self):
for tz in get_timezones("Europe/Paris"):
with self.subTest(repr(tz)):
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=tz)
Event.objects.create(dt=dt)
next = dt + datetime.timedelta(seconds=3)
prev = dt - datetime.timedelta(seconds=3)
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__exact=next).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, next)).count(), 0)
self.assertEqual(
Event.objects.filter(dt__in=(prev, dt, next)).count(), 1
)
self.assertEqual(
Event.objects.filter(dt__range=(prev, next)).count(), 1
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_connection_timezone(self):
tests = [
(False, None, datetime.timezone),
(False, "Africa/Nairobi", zoneinfo.ZoneInfo),
]
if pytz is not None:
tests += [
(True, None, datetime.timezone),
(True, "Africa/Nairobi", pytz.BaseTzInfo),
]
for use_pytz, connection_tz, expected_type in tests:
with self.subTest(use_pytz=use_pytz, connection_tz=connection_tz):
with self.settings(USE_DEPRECATED_PYTZ=use_pytz):
with override_database_connection_timezone(connection_tz):
self.assertIsInstance(connection.timezone, expected_type)
def test_query_convert_timezones(self):
# Connection timezone is equal to the current timezone, datetime
# shouldn't be converted.
with override_database_connection_timezone("Africa/Nairobi"):
event_datetime = datetime.datetime(2016, 1, 2, 23, 10, 11, 123, tzinfo=EAT)
event = Event.objects.create(dt=event_datetime)
self.assertEqual(
Event.objects.filter(dt__date=event_datetime.date()).first(), event
)
# Connection timezone is not equal to the current timezone, datetime
# should be converted (-4h).
with override_database_connection_timezone("Asia/Bangkok"):
event_datetime = datetime.datetime(2016, 1, 2, 3, 10, 11, tzinfo=ICT)
event = Event.objects.create(dt=event_datetime)
self.assertEqual(
Event.objects.filter(dt__date=datetime.date(2016, 1, 1)).first(), event
)
@requires_tz_support
def test_query_filter_with_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
dt = dt.replace(tzinfo=None)
# naive datetimes are interpreted in local time
with self.assertWarnsMessage(RuntimeWarning, self.naive_warning):
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
with self.assertWarnsMessage(RuntimeWarning, self.naive_warning):
self.assertEqual(Event.objects.filter(dt__lte=dt).count(), 1)
with self.assertWarnsMessage(RuntimeWarning, self.naive_warning):
self.assertEqual(Event.objects.filter(dt__gt=dt).count(), 0)
@skipUnlessDBFeature("has_zoneinfo_database")
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__iso_week_day=6).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
@skipUnlessDBFeature("has_zoneinfo_database")
def test_query_datetime_lookups_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
# These two dates fall in the same day in EAT, but in different days,
# years and months in UTC.
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 1)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 1)
self.assertEqual(Event.objects.filter(dt__iso_week_day=6).count(), 1)
self.assertEqual(Event.objects.filter(dt__hour=22).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT))
result = Event.objects.aggregate(Min("dt"), Max("dt"))
self.assertEqual(
result,
{
"dt__min": datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT),
"dt__max": datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT),
},
)
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name="morning")
afternoon = Session.objects.create(name="afternoon")
SessionEvent.objects.create(
dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), session=afternoon
)
SessionEvent.objects.create(
dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), session=afternoon
)
SessionEvent.objects.create(
dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), session=morning
)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min("events__dt")).order_by("dt"),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt,
)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min("events__dt")).filter(
dt__lt=afternoon_min_dt
),
[morning_min_dt],
transform=lambda d: d.dt,
)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min("events__dt")).filter(
dt__gte=afternoon_min_dt
),
[afternoon_min_dt],
transform=lambda d: d.dt,
)
@skipUnlessDBFeature("has_zoneinfo_database")
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertSequenceEqual(
Event.objects.datetimes("dt", "year"),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
)
self.assertSequenceEqual(
Event.objects.datetimes("dt", "month"),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
)
self.assertSequenceEqual(
Event.objects.datetimes("dt", "day"),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
)
self.assertSequenceEqual(
Event.objects.datetimes("dt", "hour"),
[
datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 0, 0, tzinfo=EAT),
],
)
self.assertSequenceEqual(
Event.objects.datetimes("dt", "minute"),
[
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT),
],
)
self.assertSequenceEqual(
Event.objects.datetimes("dt", "second"),
[
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT),
],
)
@skipUnlessDBFeature("has_zoneinfo_database")
def test_query_datetimes_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
self.assertSequenceEqual(
Event.objects.datetimes("dt", "year"),
[
datetime.datetime(2010, 1, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC),
],
)
self.assertSequenceEqual(
Event.objects.datetimes("dt", "month"),
[
datetime.datetime(2010, 12, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC),
],
)
self.assertSequenceEqual(
Event.objects.datetimes("dt", "day"),
[
datetime.datetime(2010, 12, 31, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC),
],
)
self.assertSequenceEqual(
Event.objects.datetimes("dt", "hour"),
[
datetime.datetime(2010, 12, 31, 22, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=UTC),
],
)
self.assertSequenceEqual(
Event.objects.datetimes("dt", "minute"),
[
datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC),
],
)
self.assertSequenceEqual(
Event.objects.datetimes("dt", "second"),
[
datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC),
],
)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
event = Event.objects.create(dt=dt)
self.assertSequenceEqual(
list(
Event.objects.raw("SELECT * FROM timezones_event WHERE dt = %s", [dt])
),
[event],
)
@skipUnlessDBFeature("supports_timezones")
def test_cursor_execute_accepts_aware_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with connection.cursor() as cursor:
cursor.execute("INSERT INTO timezones_event (dt) VALUES (%s)", [dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature("supports_timezones")
def test_cursor_execute_accepts_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
utc_naive_dt = timezone.make_naive(dt, datetime.timezone.utc)
with connection.cursor() as cursor:
cursor.execute(
"INSERT INTO timezones_event (dt) VALUES (%s)", [utc_naive_dt]
)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature("supports_timezones")
def test_cursor_execute_returns_aware_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute("SELECT dt FROM timezones_event WHERE dt = %s", [dt])
self.assertEqual(cursor.fetchall()[0][0], dt)
@skipIfDBFeature("supports_timezones")
def test_cursor_execute_returns_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
utc_naive_dt = timezone.make_naive(dt, datetime.timezone.utc)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute(
"SELECT dt FROM timezones_event WHERE dt = %s", [utc_naive_dt]
)
self.assertEqual(cursor.fetchall()[0][0], utc_naive_dt)
@skipUnlessDBFeature("supports_timezones")
def test_cursor_explicit_time_zone(self):
with override_database_connection_timezone("Europe/Paris"):
with connection.cursor() as cursor:
cursor.execute("SELECT CURRENT_TIMESTAMP")
now = cursor.fetchone()[0]
self.assertEqual(str(now.tzinfo), "Europe/Paris")
@requires_tz_support
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertFalse(AllDayEvent.objects.filter(day__gte=dt).exists())
def test_null_datetime(self):
# Regression test for #17294
e = MaybeEvent.objects.create()
self.assertIsNone(e.dt)
def test_update_with_timedelta(self):
initial_dt = timezone.now().replace(microsecond=0)
event = Event.objects.create(dt=initial_dt)
Event.objects.update(dt=F("dt") + timedelta(hours=2))
event.refresh_from_db()
self.assertEqual(event.dt, initial_dt + timedelta(hours=2))
@override_settings(TIME_ZONE="Africa/Nairobi", USE_TZ=True)
class ForcedTimeZoneDatabaseTests(TransactionTestCase):
"""
Test the TIME_ZONE database configuration parameter.
Since this involves reading and writing to the same database through two
connections, this is a TransactionTestCase.
"""
available_apps = ["timezones"]
@classmethod
def setUpClass(cls):
# @skipIfDBFeature and @skipUnlessDBFeature cannot be chained. The
# outermost takes precedence. Handle skipping manually instead.
if connection.features.supports_timezones:
raise SkipTest("Database has feature(s) supports_timezones")
if not connection.features.test_db_allows_multiple_connections:
raise SkipTest(
"Database doesn't support feature(s): "
"test_db_allows_multiple_connections"
)
super().setUpClass()
def test_read_datetime(self):
fake_dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=UTC)
Event.objects.create(dt=fake_dt)
with override_database_connection_timezone("Asia/Bangkok"):
event = Event.objects.get()
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
self.assertEqual(event.dt, dt)
def test_write_datetime(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
with override_database_connection_timezone("Asia/Bangkok"):
Event.objects.create(dt=dt)
event = Event.objects.get()
fake_dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=UTC)
self.assertEqual(event.dt, fake_dt)
@override_settings(TIME_ZONE="Africa/Nairobi")
class SerializationTests(SimpleTestCase):
# Backend-specific notes:
# - JSON supports only milliseconds, microseconds will be truncated.
# - PyYAML dumps the UTC offset correctly for timezone-aware datetimes.
# When PyYAML < 5.3 loads this representation, it subtracts the offset
# and returns a naive datetime object in UTC. PyYAML 5.3+ loads timezones
# correctly.
# Tests are adapted to take these quirks into account.
def assert_python_contains_datetime(self, objects, dt):
self.assertEqual(objects[0]["fields"]["dt"], dt)
def assert_json_contains_datetime(self, json, dt):
self.assertIn('"fields": {"dt": "%s"}' % dt, json)
def assert_xml_contains_datetime(self, xml, dt):
field = parseString(xml).getElementsByTagName("field")[0]
self.assertXMLEqual(field.childNodes[0].wholeText, dt)
def assert_yaml_contains_datetime(self, yaml, dt):
# Depending on the yaml dumper, '!timestamp' might be absent
self.assertRegex(yaml, r"\n fields: {dt: !(!timestamp)? '%s'}" % re.escape(dt))
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
data = serializers.serialize("python", [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize("python", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("json", [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize("json", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("xml", [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize("xml", data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(
serializers.get_serializer("yaml"), serializers.BadSerializer
):
data = serializers.serialize(
"yaml", [Event(dt=dt)], default_flow_style=None
)
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30")
obj = next(serializers.deserialize("yaml", data)).object
self.assertEqual(obj.dt, dt)
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
data = serializers.serialize("python", [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize("python", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("json", [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30.405")
obj = next(serializers.deserialize("json", data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize("xml", [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30.405060")
obj = next(serializers.deserialize("xml", data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(
serializers.get_serializer("yaml"), serializers.BadSerializer
):
data = serializers.serialize(
"yaml", [Event(dt=dt)], default_flow_style=None
)
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30.405060")
obj = next(serializers.deserialize("yaml", data)).object
self.assertEqual(obj.dt, dt)
def test_aware_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, 405060, tzinfo=ICT)
data = serializers.serialize("python", [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize("python", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("json", [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30.405+07:00")
obj = next(serializers.deserialize("json", data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize("xml", [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30.405060+07:00")
obj = next(serializers.deserialize("xml", data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(
serializers.get_serializer("yaml"), serializers.BadSerializer
):
data = serializers.serialize(
"yaml", [Event(dt=dt)], default_flow_style=None
)
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30.405060+07:00")
obj = next(serializers.deserialize("yaml", data)).object
if HAS_YAML and yaml.__version__ < "5.3":
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
else:
self.assertEqual(obj.dt, dt)
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
data = serializers.serialize("python", [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize("python", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("json", [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T10:20:30Z")
obj = next(serializers.deserialize("json", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("xml", [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T10:20:30+00:00")
obj = next(serializers.deserialize("xml", data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(
serializers.get_serializer("yaml"), serializers.BadSerializer
):
data = serializers.serialize(
"yaml", [Event(dt=dt)], default_flow_style=None
)
self.assert_yaml_contains_datetime(data, "2011-09-01 10:20:30+00:00")
obj = next(serializers.deserialize("yaml", data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
data = serializers.serialize("python", [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize("python", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("json", [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize("json", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("xml", [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize("xml", data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(
serializers.get_serializer("yaml"), serializers.BadSerializer
):
data = serializers.serialize(
"yaml", [Event(dt=dt)], default_flow_style=None
)
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30+03:00")
obj = next(serializers.deserialize("yaml", data)).object
if HAS_YAML and yaml.__version__ < "5.3":
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
else:
self.assertEqual(obj.dt, dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
data = serializers.serialize("python", [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize("python", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("json", [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize("json", data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize("xml", [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize("xml", data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(
serializers.get_serializer("yaml"), serializers.BadSerializer
):
data = serializers.serialize(
"yaml", [Event(dt=dt)], default_flow_style=None
)
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30+07:00")
obj = next(serializers.deserialize("yaml", data)).object
if HAS_YAML and yaml.__version__ < "5.3":
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
else:
self.assertEqual(obj.dt, dt)
# RemovedInDjango50Warning: When the deprecation ends, remove setUpClass() and
# USE_L10N=False. The tests should remain because format-related settings will
# take precedence over locale-dictated formats.
@override_settings(
DATETIME_FORMAT="c", TIME_ZONE="Africa/Nairobi", USE_L10N=False, USE_TZ=True
)
class TemplateTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
with ignore_warnings(category=RemovedInDjango50Warning):
super().setUpClass()
@requires_tz_support
def test_localtime_templatetag_and_filters(self):
"""
Test the {% localtime %} templatetag and related filters.
"""
datetimes = {
"utc": datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
"eat": datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
"ict": datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT),
"naive": datetime.datetime(2011, 9, 1, 13, 20, 30),
}
templates = {
"notag": Template(
"{% load tz %}"
"{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}"
),
"noarg": Template(
"{% load tz %}{% localtime %}{{ dt }}|{{ dt|localtime }}|"
"{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"
),
"on": Template(
"{% load tz %}{% localtime on %}{{ dt }}|{{ dt|localtime }}|"
"{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"
),
"off": Template(
"{% load tz %}{% localtime off %}{{ dt }}|{{ dt|localtime }}|"
"{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"
),
}
# Transform a list of keys in 'datetimes' to the expected template
# output. This makes the definition of 'results' more readable.
def t(*result):
return "|".join(datetimes[key].isoformat() for key in result)
# Results for USE_TZ = True
results = {
"utc": {
"notag": t("eat", "eat", "utc", "ict"),
"noarg": t("eat", "eat", "utc", "ict"),
"on": t("eat", "eat", "utc", "ict"),
"off": t("utc", "eat", "utc", "ict"),
},
"eat": {
"notag": t("eat", "eat", "utc", "ict"),
"noarg": t("eat", "eat", "utc", "ict"),
"on": t("eat", "eat", "utc", "ict"),
"off": t("eat", "eat", "utc", "ict"),
},
"ict": {
"notag": t("eat", "eat", "utc", "ict"),
"noarg": t("eat", "eat", "utc", "ict"),
"on": t("eat", "eat", "utc", "ict"),
"off": t("ict", "eat", "utc", "ict"),
},
"naive": {
"notag": t("naive", "eat", "utc", "ict"),
"noarg": t("naive", "eat", "utc", "ict"),
"on": t("naive", "eat", "utc", "ict"),
"off": t("naive", "eat", "utc", "ict"),
},
}
for k1, dt in datetimes.items():
for k2, tpl in templates.items():
ctx = Context({"dt": dt, "ICT": ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(
actual, expected, "%s / %s: %r != %r" % (k1, k2, actual, expected)
)
# Changes for USE_TZ = False
results["utc"]["notag"] = t("utc", "eat", "utc", "ict")
results["ict"]["notag"] = t("ict", "eat", "utc", "ict")
with self.settings(USE_TZ=False):
for k1, dt in datetimes.items():
for k2, tpl in templates.items():
ctx = Context({"dt": dt, "ICT": ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(
actual,
expected,
"%s / %s: %r != %r" % (k1, k2, actual, expected),
)
def test_localtime_filters_with_iana(self):
"""
Test the |localtime, |utc, and |timezone filters with iana zones.
"""
# Use an IANA timezone as local time
tpl = Template("{% load tz %}{{ dt|localtime }}|{{ dt|utc }}")
ctx = Context({"dt": datetime.datetime(2011, 9, 1, 12, 20, 30)})
with self.settings(TIME_ZONE="Europe/Paris"):
self.assertEqual(
tpl.render(ctx), "2011-09-01T12:20:30+02:00|2011-09-01T10:20:30+00:00"
)
# Use an IANA timezone as argument
for tz in get_timezones("Europe/Paris"):
with self.subTest(repr(tz)):
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context(
{
"dt": datetime.datetime(2011, 9, 1, 13, 20, 30),
"tz": tz,
}
)
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_localtime_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% localtime foo %}{% endlocaltime %}").render()
def test_localtime_filters_do_not_raise_exceptions(self):
"""
Test the |localtime, |utc, and |timezone filters on bad inputs.
"""
tpl = Template(
"{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:tz }}"
)
with self.settings(USE_TZ=True):
# bad datetime value
ctx = Context({"dt": None, "tz": ICT})
self.assertEqual(tpl.render(ctx), "None|||")
ctx = Context({"dt": "not a date", "tz": ICT})
self.assertEqual(tpl.render(ctx), "not a date|||")
# bad timezone value
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({"dt": datetime.datetime(2011, 9, 1, 13, 20, 30), "tz": None})
self.assertEqual(tpl.render(ctx), "")
ctx = Context(
{"dt": datetime.datetime(2011, 9, 1, 13, 20, 30), "tz": "not a tz"}
)
self.assertEqual(tpl.render(ctx), "")
@requires_tz_support
def test_timezone_templatetag(self):
"""
Test the {% timezone %} templatetag.
"""
tpl = Template(
"{% load tz %}"
"{{ dt }}|"
"{% timezone tz1 %}"
"{{ dt }}|"
"{% timezone tz2 %}"
"{{ dt }}"
"{% endtimezone %}"
"{% endtimezone %}"
)
ctx = Context(
{
"dt": datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
"tz1": ICT,
"tz2": None,
}
)
self.assertEqual(
tpl.render(ctx),
"2011-09-01T13:20:30+03:00|2011-09-01T17:20:30+07:00|"
"2011-09-01T13:20:30+03:00",
)
def test_timezone_templatetag_with_iana(self):
"""
Test the {% timezone %} templatetag with IANA time zone providers.
"""
tpl = Template("{% load tz %}{% timezone tz %}{{ dt }}{% endtimezone %}")
# Use a IANA timezone as argument
for tz in get_timezones("Europe/Paris"):
with self.subTest(repr(tz)):
ctx = Context(
{
"dt": datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
"tz": tz,
}
)
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a IANA timezone name as argument
ctx = Context(
{
"dt": datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
"tz": "Europe/Paris",
}
)
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
@ignore_warnings(category=RemovedInDjango50Warning)
def test_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% timezone %}{% endtimezone %}").render()
with self.assertRaises(zoneinfo.ZoneInfoNotFoundError):
Template("{% load tz %}{% timezone tz %}{% endtimezone %}").render(
Context({"tz": "foobar"})
)
if pytz is not None:
with override_settings(USE_DEPRECATED_PYTZ=True), self.assertRaises(
pytz.UnknownTimeZoneError
):
Template("{% load tz %}{% timezone tz %}{% endtimezone %}").render(
Context({"tz": "foobar"})
)
@skipIf(sys.platform == "win32", "Windows uses non-standard time zone names")
def test_get_current_timezone_templatetag(self):
"""
Test the {% get_current_timezone %} templatetag.
"""
tpl = Template(
"{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}"
)
self.assertEqual(tpl.render(Context()), "Africa/Nairobi")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context()), "UTC")
tpl = Template(
"{% load tz %}{% timezone tz %}{% get_current_timezone as time_zone %}"
"{% endtimezone %}{{ time_zone }}"
)
self.assertEqual(tpl.render(Context({"tz": ICT})), "+0700")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context({"tz": ICT})), "+0700")
def test_get_current_timezone_templatetag_with_iana(self):
"""
Test the {% get_current_timezone %} templatetag with pytz.
"""
tpl = Template(
"{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}"
)
for tz in get_timezones("Europe/Paris"):
with self.subTest(repr(tz)):
with timezone.override(tz):
self.assertEqual(tpl.render(Context()), "Europe/Paris")
tpl = Template(
"{% load tz %}{% timezone 'Europe/Paris' %}"
"{% get_current_timezone as time_zone %}{% endtimezone %}"
"{{ time_zone }}"
)
self.assertEqual(tpl.render(Context()), "Europe/Paris")
def test_get_current_timezone_templatetag_invalid_argument(self):
msg = (
"'get_current_timezone' requires 'as variable' (got "
"['get_current_timezone'])"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
Template("{% load tz %}{% get_current_timezone %}").render()
@skipIf(sys.platform == "win32", "Windows uses non-standard time zone names")
def test_tz_template_context_processor(self):
"""
Test the django.template.context_processors.tz template context processor.
"""
tpl = Template("{{ TIME_ZONE }}")
context = Context()
self.assertEqual(tpl.render(context), "")
request_context = RequestContext(
HttpRequest(), processors=[context_processors.tz]
)
self.assertEqual(tpl.render(request_context), "Africa/Nairobi")
@requires_tz_support
def test_date_and_time_template_filters(self):
tpl = Template("{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}")
ctx = Context({"dt": datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 23:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-02 at 03:20:20")
def test_date_and_time_template_filters_honor_localtime(self):
tpl = Template(
"{% load tz %}{% localtime off %}{{ dt|date:'Y-m-d' }} at "
"{{ dt|time:'H:i:s' }}{% endlocaltime %}"
)
ctx = Context({"dt": datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
@requires_tz_support
def test_now_template_tag_uses_current_time_zone(self):
# Regression for #17343
tpl = Template('{% now "O" %}')
self.assertEqual(tpl.render(Context({})), "+0300")
with timezone.override(ICT):
self.assertEqual(tpl.render(Context({})), "+0700")
# RemovedInDjango50Warning: When the deprecation ends, remove setUpClass() and
# USE_L10N=False. The tests should remain because format-related settings will
# take precedence over locale-dictated formats.
@override_settings(
DATETIME_FORMAT="c", TIME_ZONE="Africa/Nairobi", USE_L10N=False, USE_TZ=False
)
class LegacyFormsTests(TestCase):
@classmethod
def setUpClass(cls):
with ignore_warnings(category=RemovedInDjango50Warning):
super().setUpClass()
def test_form(self):
form = EventForm({"dt": "2011-09-01 13:20:30"})
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data["dt"], datetime.datetime(2011, 9, 1, 13, 20, 30)
)
def test_form_with_non_existent_time(self):
form = EventForm({"dt": "2011-03-27 02:30:00"})
for tz in get_timezones("Europe/Paris"):
with self.subTest(repr(tz)):
with timezone.override(tz):
# This is a bug.
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data["dt"],
datetime.datetime(2011, 3, 27, 2, 30, 0),
)
def test_form_with_ambiguous_time(self):
form = EventForm({"dt": "2011-10-30 02:30:00"})
for tz in get_timezones("Europe/Paris"):
with self.subTest(repr(tz)):
with timezone.override(tz):
# This is a bug.
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data["dt"],
datetime.datetime(2011, 10, 30, 2, 30, 0),
)
def test_split_form(self):
form = EventSplitForm({"dt_0": "2011-09-01", "dt_1": "13:20:30"})
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data["dt"], datetime.datetime(2011, 9, 1, 13, 20, 30)
)
def test_model_form(self):
EventModelForm({"dt": "2011-09-01 13:20:30"}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 13, 20, 30))
# RemovedInDjango50Warning: When the deprecation ends, remove setUpClass() and
# USE_L10N=False. The tests should remain because format-related settings will
# take precedence over locale-dictated formats.
@override_settings(
DATETIME_FORMAT="c", TIME_ZONE="Africa/Nairobi", USE_L10N=False, USE_TZ=True
)
class NewFormsTests(TestCase):
@classmethod
def setUpClass(cls):
with ignore_warnings(category=RemovedInDjango50Warning):
super().setUpClass()
@requires_tz_support
def test_form(self):
form = EventForm({"dt": "2011-09-01 13:20:30"})
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data["dt"],
datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
)
def test_form_with_other_timezone(self):
form = EventForm({"dt": "2011-09-01 17:20:30"})
with timezone.override(ICT):
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data["dt"],
datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
)
def test_form_with_non_existent_time(self):
for tz in get_timezones("Europe/Paris"):
with self.subTest(repr(tz)):
with timezone.override(tz):
form = EventForm({"dt": "2011-03-27 02:30:00"})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors["dt"],
[
"2011-03-27 02:30:00 couldn’t be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."
],
)
def test_form_with_ambiguous_time(self):
for tz in get_timezones("Europe/Paris"):
with self.subTest(repr(tz)):
with timezone.override(tz):
form = EventForm({"dt": "2011-10-30 02:30:00"})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors["dt"],
[
"2011-10-30 02:30:00 couldn’t be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."
],
)
@requires_tz_support
def test_split_form(self):
form = EventSplitForm({"dt_0": "2011-09-01", "dt_1": "13:20:30"})
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data["dt"],
datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
)
@requires_tz_support
def test_localized_form(self):
form = EventLocalizedForm(
initial={"dt": datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)}
)
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@requires_tz_support
def test_model_form(self):
EventModelForm({"dt": "2011-09-01 13:20:30"}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_model_form(self):
form = EventLocalizedModelForm(
instance=Event(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))
)
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@override_settings(
DATETIME_FORMAT="c",
TIME_ZONE="Africa/Nairobi",
USE_L10N=False,
USE_TZ=True,
ROOT_URLCONF="timezones.urls",
)
class AdminTests(TestCase):
@classmethod
def setUpClass(cls):
with ignore_warnings(category=RemovedInDjango50Warning):
super().setUpClass()
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(
password="secret",
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10, tzinfo=UTC),
is_superuser=True,
username="super",
first_name="Super",
last_name="User",
email="[email protected]",
is_staff=True,
is_active=True,
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10, tzinfo=UTC),
)
def setUp(self):
self.client.force_login(self.u1)
@requires_tz_support
def test_changelist(self):
e = Event.objects.create(
dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
)
response = self.client.get(reverse("admin_tz:timezones_event_changelist"))
self.assertContains(response, e.dt.astimezone(EAT).isoformat())
def test_changelist_in_other_timezone(self):
e = Event.objects.create(
dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
)
with timezone.override(ICT):
response = self.client.get(reverse("admin_tz:timezones_event_changelist"))
self.assertContains(response, e.dt.astimezone(ICT).isoformat())
@requires_tz_support
def test_change_editable(self):
e = Event.objects.create(
dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
)
response = self.client.get(
reverse("admin_tz:timezones_event_change", args=(e.pk,))
)
self.assertContains(response, e.dt.astimezone(EAT).date().isoformat())
self.assertContains(response, e.dt.astimezone(EAT).time().isoformat())
def test_change_editable_in_other_timezone(self):
e = Event.objects.create(
dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
)
with timezone.override(ICT):
response = self.client.get(
reverse("admin_tz:timezones_event_change", args=(e.pk,))
)
self.assertContains(response, e.dt.astimezone(ICT).date().isoformat())
self.assertContains(response, e.dt.astimezone(ICT).time().isoformat())
@requires_tz_support
def test_change_readonly(self):
t = Timestamp.objects.create()
response = self.client.get(
reverse("admin_tz:timezones_timestamp_change", args=(t.pk,))
)
self.assertContains(response, t.created.astimezone(EAT).isoformat())
def test_change_readonly_in_other_timezone(self):
t = Timestamp.objects.create()
with timezone.override(ICT):
response = self.client.get(
reverse("admin_tz:timezones_timestamp_change", args=(t.pk,))
)
self.assertContains(response, t.created.astimezone(ICT).isoformat())
|
96d04d795ae6a0a0244c874b7fd85802c09763ae74e5c6fda4baa58b4b92c8ec | import collections.abc
from datetime import datetime
from math import ceil
from operator import attrgetter
from unittest import skipUnless
from django.core.exceptions import FieldError
from django.db import connection, models
from django.db.models import (
BooleanField,
Case,
Exists,
ExpressionWrapper,
F,
Max,
OuterRef,
Q,
Subquery,
Value,
When,
)
from django.db.models.functions import Cast, Substr
from django.db.models.lookups import (
Exact,
GreaterThan,
GreaterThanOrEqual,
IsNull,
LessThan,
LessThanOrEqual,
)
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import isolate_apps
from .models import (
Article,
Author,
Freebie,
Game,
IsNullWithNoneAsRHS,
Player,
Product,
Season,
Stock,
Tag,
)
class LookupTests(TestCase):
@classmethod
def setUpTestData(cls):
# Create a few Authors.
cls.au1 = Author.objects.create(name="Author 1", alias="a1")
cls.au2 = Author.objects.create(name="Author 2", alias="a2")
# Create a few Articles.
cls.a1 = Article.objects.create(
headline="Article 1",
pub_date=datetime(2005, 7, 26),
author=cls.au1,
slug="a1",
)
cls.a2 = Article.objects.create(
headline="Article 2",
pub_date=datetime(2005, 7, 27),
author=cls.au1,
slug="a2",
)
cls.a3 = Article.objects.create(
headline="Article 3",
pub_date=datetime(2005, 7, 27),
author=cls.au1,
slug="a3",
)
cls.a4 = Article.objects.create(
headline="Article 4",
pub_date=datetime(2005, 7, 28),
author=cls.au1,
slug="a4",
)
cls.a5 = Article.objects.create(
headline="Article 5",
pub_date=datetime(2005, 8, 1, 9, 0),
author=cls.au2,
slug="a5",
)
cls.a6 = Article.objects.create(
headline="Article 6",
pub_date=datetime(2005, 8, 1, 8, 0),
author=cls.au2,
slug="a6",
)
cls.a7 = Article.objects.create(
headline="Article 7",
pub_date=datetime(2005, 7, 27),
author=cls.au2,
slug="a7",
)
# Create a few Tags.
cls.t1 = Tag.objects.create(name="Tag 1")
cls.t1.articles.add(cls.a1, cls.a2, cls.a3)
cls.t2 = Tag.objects.create(name="Tag 2")
cls.t2.articles.add(cls.a3, cls.a4, cls.a5)
cls.t3 = Tag.objects.create(name="Tag 3")
cls.t3.articles.add(cls.a5, cls.a6, cls.a7)
def test_exists(self):
# We can use .exists() to check that there are some
self.assertTrue(Article.objects.exists())
for a in Article.objects.all():
a.delete()
# There should be none now!
self.assertFalse(Article.objects.exists())
def test_lookup_int_as_str(self):
# Integer value can be queried using string
self.assertSequenceEqual(
Article.objects.filter(id__iexact=str(self.a1.id)),
[self.a1],
)
@skipUnlessDBFeature("supports_date_lookup_using_string")
def test_lookup_date_as_str(self):
# A date lookup can be performed using a string search
self.assertSequenceEqual(
Article.objects.filter(pub_date__startswith="2005"),
[self.a5, self.a6, self.a4, self.a2, self.a3, self.a7, self.a1],
)
def test_iterator(self):
# Each QuerySet gets iterator(), which is a generator that "lazily"
# returns results using database-level iteration.
self.assertIsInstance(Article.objects.iterator(), collections.abc.Iterator)
self.assertQuerysetEqual(
Article.objects.iterator(),
[
"Article 5",
"Article 6",
"Article 4",
"Article 2",
"Article 3",
"Article 7",
"Article 1",
],
transform=attrgetter("headline"),
)
# iterator() can be used on any QuerySet.
self.assertQuerysetEqual(
Article.objects.filter(headline__endswith="4").iterator(),
["Article 4"],
transform=attrgetter("headline"),
)
def test_count(self):
# count() returns the number of objects matching search criteria.
self.assertEqual(Article.objects.count(), 7)
self.assertEqual(
Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3
)
self.assertEqual(
Article.objects.filter(headline__startswith="Blah blah").count(), 0
)
# count() should respect sliced query sets.
articles = Article.objects.all()
self.assertEqual(articles.count(), 7)
self.assertEqual(articles[:4].count(), 4)
self.assertEqual(articles[1:100].count(), 6)
self.assertEqual(articles[10:100].count(), 0)
# Date and date/time lookups can also be done with strings.
self.assertEqual(
Article.objects.filter(pub_date__exact="2005-07-27 00:00:00").count(), 3
)
def test_in_bulk(self):
# in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects.
arts = Article.objects.in_bulk([self.a1.id, self.a2.id])
self.assertEqual(arts[self.a1.id], self.a1)
self.assertEqual(arts[self.a2.id], self.a2)
self.assertEqual(
Article.objects.in_bulk(),
{
self.a1.id: self.a1,
self.a2.id: self.a2,
self.a3.id: self.a3,
self.a4.id: self.a4,
self.a5.id: self.a5,
self.a6.id: self.a6,
self.a7.id: self.a7,
},
)
self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk({self.a3.id}), {self.a3.id: self.a3})
self.assertEqual(
Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3}
)
self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk([1000]), {})
self.assertEqual(Article.objects.in_bulk([]), {})
self.assertEqual(
Article.objects.in_bulk(iter([self.a1.id])), {self.a1.id: self.a1}
)
self.assertEqual(Article.objects.in_bulk(iter([])), {})
with self.assertRaises(TypeError):
Article.objects.in_bulk(headline__startswith="Blah")
def test_in_bulk_lots_of_ids(self):
test_range = 2000
max_query_params = connection.features.max_query_params
expected_num_queries = (
ceil(test_range / max_query_params) if max_query_params else 1
)
Author.objects.bulk_create(
[Author() for i in range(test_range - Author.objects.count())]
)
authors = {author.pk: author for author in Author.objects.all()}
with self.assertNumQueries(expected_num_queries):
self.assertEqual(Author.objects.in_bulk(authors), authors)
def test_in_bulk_with_field(self):
self.assertEqual(
Article.objects.in_bulk(
[self.a1.slug, self.a2.slug, self.a3.slug], field_name="slug"
),
{
self.a1.slug: self.a1,
self.a2.slug: self.a2,
self.a3.slug: self.a3,
},
)
def test_in_bulk_meta_constraint(self):
season_2011 = Season.objects.create(year=2011)
season_2012 = Season.objects.create(year=2012)
Season.objects.create(year=2013)
self.assertEqual(
Season.objects.in_bulk(
[season_2011.year, season_2012.year],
field_name="year",
),
{season_2011.year: season_2011, season_2012.year: season_2012},
)
def test_in_bulk_non_unique_field(self):
msg = "in_bulk()'s field_name must be a unique field but 'author' isn't."
with self.assertRaisesMessage(ValueError, msg):
Article.objects.in_bulk([self.au1], field_name="author")
@skipUnlessDBFeature("can_distinct_on_fields")
def test_in_bulk_distinct_field(self):
self.assertEqual(
Article.objects.order_by("headline")
.distinct("headline")
.in_bulk(
[self.a1.headline, self.a5.headline],
field_name="headline",
),
{self.a1.headline: self.a1, self.a5.headline: self.a5},
)
@skipUnlessDBFeature("can_distinct_on_fields")
def test_in_bulk_multiple_distinct_field(self):
msg = "in_bulk()'s field_name must be a unique field but 'pub_date' isn't."
with self.assertRaisesMessage(ValueError, msg):
Article.objects.order_by("headline", "pub_date").distinct(
"headline",
"pub_date",
).in_bulk(field_name="pub_date")
@isolate_apps("lookup")
def test_in_bulk_non_unique_meta_constaint(self):
class Model(models.Model):
ean = models.CharField(max_length=100)
brand = models.CharField(max_length=100)
name = models.CharField(max_length=80)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["ean"],
name="partial_ean_unique",
condition=models.Q(is_active=True),
),
models.UniqueConstraint(
fields=["brand", "name"],
name="together_brand_name_unique",
),
]
msg = "in_bulk()'s field_name must be a unique field but '%s' isn't."
for field_name in ["brand", "ean"]:
with self.subTest(field_name=field_name):
with self.assertRaisesMessage(ValueError, msg % field_name):
Model.objects.in_bulk(field_name=field_name)
def test_in_bulk_sliced_queryset(self):
msg = "Cannot use 'limit' or 'offset' with in_bulk()."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:5].in_bulk([self.a1.id, self.a2.id])
def test_values(self):
# values() returns a list of dictionaries instead of object instances --
# and you can specify which fields you want to retrieve.
self.assertSequenceEqual(
Article.objects.values("headline"),
[
{"headline": "Article 5"},
{"headline": "Article 6"},
{"headline": "Article 4"},
{"headline": "Article 2"},
{"headline": "Article 3"},
{"headline": "Article 7"},
{"headline": "Article 1"},
],
)
self.assertSequenceEqual(
Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values("id"),
[{"id": self.a2.id}, {"id": self.a3.id}, {"id": self.a7.id}],
)
self.assertSequenceEqual(
Article.objects.values("id", "headline"),
[
{"id": self.a5.id, "headline": "Article 5"},
{"id": self.a6.id, "headline": "Article 6"},
{"id": self.a4.id, "headline": "Article 4"},
{"id": self.a2.id, "headline": "Article 2"},
{"id": self.a3.id, "headline": "Article 3"},
{"id": self.a7.id, "headline": "Article 7"},
{"id": self.a1.id, "headline": "Article 1"},
],
)
# You can use values() with iterator() for memory savings,
# because iterator() uses database-level iteration.
self.assertSequenceEqual(
list(Article.objects.values("id", "headline").iterator()),
[
{"headline": "Article 5", "id": self.a5.id},
{"headline": "Article 6", "id": self.a6.id},
{"headline": "Article 4", "id": self.a4.id},
{"headline": "Article 2", "id": self.a2.id},
{"headline": "Article 3", "id": self.a3.id},
{"headline": "Article 7", "id": self.a7.id},
{"headline": "Article 1", "id": self.a1.id},
],
)
# The values() method works with "extra" fields specified in extra(select).
self.assertSequenceEqual(
Article.objects.extra(select={"id_plus_one": "id + 1"}).values(
"id", "id_plus_one"
),
[
{"id": self.a5.id, "id_plus_one": self.a5.id + 1},
{"id": self.a6.id, "id_plus_one": self.a6.id + 1},
{"id": self.a4.id, "id_plus_one": self.a4.id + 1},
{"id": self.a2.id, "id_plus_one": self.a2.id + 1},
{"id": self.a3.id, "id_plus_one": self.a3.id + 1},
{"id": self.a7.id, "id_plus_one": self.a7.id + 1},
{"id": self.a1.id, "id_plus_one": self.a1.id + 1},
],
)
data = {
"id_plus_one": "id+1",
"id_plus_two": "id+2",
"id_plus_three": "id+3",
"id_plus_four": "id+4",
"id_plus_five": "id+5",
"id_plus_six": "id+6",
"id_plus_seven": "id+7",
"id_plus_eight": "id+8",
}
self.assertSequenceEqual(
Article.objects.filter(id=self.a1.id).extra(select=data).values(*data),
[
{
"id_plus_one": self.a1.id + 1,
"id_plus_two": self.a1.id + 2,
"id_plus_three": self.a1.id + 3,
"id_plus_four": self.a1.id + 4,
"id_plus_five": self.a1.id + 5,
"id_plus_six": self.a1.id + 6,
"id_plus_seven": self.a1.id + 7,
"id_plus_eight": self.a1.id + 8,
}
],
)
# You can specify fields from forward and reverse relations, just like filter().
self.assertSequenceEqual(
Article.objects.values("headline", "author__name"),
[
{"headline": self.a5.headline, "author__name": self.au2.name},
{"headline": self.a6.headline, "author__name": self.au2.name},
{"headline": self.a4.headline, "author__name": self.au1.name},
{"headline": self.a2.headline, "author__name": self.au1.name},
{"headline": self.a3.headline, "author__name": self.au1.name},
{"headline": self.a7.headline, "author__name": self.au2.name},
{"headline": self.a1.headline, "author__name": self.au1.name},
],
)
self.assertSequenceEqual(
Author.objects.values("name", "article__headline").order_by(
"name", "article__headline"
),
[
{"name": self.au1.name, "article__headline": self.a1.headline},
{"name": self.au1.name, "article__headline": self.a2.headline},
{"name": self.au1.name, "article__headline": self.a3.headline},
{"name": self.au1.name, "article__headline": self.a4.headline},
{"name": self.au2.name, "article__headline": self.a5.headline},
{"name": self.au2.name, "article__headline": self.a6.headline},
{"name": self.au2.name, "article__headline": self.a7.headline},
],
)
self.assertSequenceEqual(
(
Author.objects.values(
"name", "article__headline", "article__tag__name"
).order_by("name", "article__headline", "article__tag__name")
),
[
{
"name": self.au1.name,
"article__headline": self.a1.headline,
"article__tag__name": self.t1.name,
},
{
"name": self.au1.name,
"article__headline": self.a2.headline,
"article__tag__name": self.t1.name,
},
{
"name": self.au1.name,
"article__headline": self.a3.headline,
"article__tag__name": self.t1.name,
},
{
"name": self.au1.name,
"article__headline": self.a3.headline,
"article__tag__name": self.t2.name,
},
{
"name": self.au1.name,
"article__headline": self.a4.headline,
"article__tag__name": self.t2.name,
},
{
"name": self.au2.name,
"article__headline": self.a5.headline,
"article__tag__name": self.t2.name,
},
{
"name": self.au2.name,
"article__headline": self.a5.headline,
"article__tag__name": self.t3.name,
},
{
"name": self.au2.name,
"article__headline": self.a6.headline,
"article__tag__name": self.t3.name,
},
{
"name": self.au2.name,
"article__headline": self.a7.headline,
"article__tag__name": self.t3.name,
},
],
)
# However, an exception FieldDoesNotExist will be thrown if you specify
# a nonexistent field name in values() (a field that is neither in the
# model nor in extra(select)).
msg = (
"Cannot resolve keyword 'id_plus_two' into field. Choices are: "
"author, author_id, headline, id, id_plus_one, pub_date, slug, tag"
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.extra(select={"id_plus_one": "id + 1"}).values(
"id", "id_plus_two"
)
# If you don't specify field names to values(), all are returned.
self.assertSequenceEqual(
Article.objects.filter(id=self.a5.id).values(),
[
{
"id": self.a5.id,
"author_id": self.au2.id,
"headline": "Article 5",
"pub_date": datetime(2005, 8, 1, 9, 0),
"slug": "a5",
}
],
)
def test_values_list(self):
# values_list() is similar to values(), except that the results are
# returned as a list of tuples, rather than a list of dictionaries.
# Within each tuple, the order of the elements is the same as the order
# of fields in the values_list() call.
self.assertSequenceEqual(
Article.objects.values_list("headline"),
[
("Article 5",),
("Article 6",),
("Article 4",),
("Article 2",),
("Article 3",),
("Article 7",),
("Article 1",),
],
)
self.assertSequenceEqual(
Article.objects.values_list("id").order_by("id"),
[
(self.a1.id,),
(self.a2.id,),
(self.a3.id,),
(self.a4.id,),
(self.a5.id,),
(self.a6.id,),
(self.a7.id,),
],
)
self.assertSequenceEqual(
Article.objects.values_list("id", flat=True).order_by("id"),
[
self.a1.id,
self.a2.id,
self.a3.id,
self.a4.id,
self.a5.id,
self.a6.id,
self.a7.id,
],
)
self.assertSequenceEqual(
Article.objects.extra(select={"id_plus_one": "id+1"})
.order_by("id")
.values_list("id"),
[
(self.a1.id,),
(self.a2.id,),
(self.a3.id,),
(self.a4.id,),
(self.a5.id,),
(self.a6.id,),
(self.a7.id,),
],
)
self.assertSequenceEqual(
Article.objects.extra(select={"id_plus_one": "id+1"})
.order_by("id")
.values_list("id_plus_one", "id"),
[
(self.a1.id + 1, self.a1.id),
(self.a2.id + 1, self.a2.id),
(self.a3.id + 1, self.a3.id),
(self.a4.id + 1, self.a4.id),
(self.a5.id + 1, self.a5.id),
(self.a6.id + 1, self.a6.id),
(self.a7.id + 1, self.a7.id),
],
)
self.assertSequenceEqual(
Article.objects.extra(select={"id_plus_one": "id+1"})
.order_by("id")
.values_list("id", "id_plus_one"),
[
(self.a1.id, self.a1.id + 1),
(self.a2.id, self.a2.id + 1),
(self.a3.id, self.a3.id + 1),
(self.a4.id, self.a4.id + 1),
(self.a5.id, self.a5.id + 1),
(self.a6.id, self.a6.id + 1),
(self.a7.id, self.a7.id + 1),
],
)
args = ("name", "article__headline", "article__tag__name")
self.assertSequenceEqual(
Author.objects.values_list(*args).order_by(*args),
[
(self.au1.name, self.a1.headline, self.t1.name),
(self.au1.name, self.a2.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t2.name),
(self.au1.name, self.a4.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t3.name),
(self.au2.name, self.a6.headline, self.t3.name),
(self.au2.name, self.a7.headline, self.t3.name),
],
)
with self.assertRaises(TypeError):
Article.objects.values_list("id", "headline", flat=True)
def test_get_next_previous_by(self):
# Every DateField and DateTimeField creates get_next_by_FOO() and
# get_previous_by_FOO() methods. In the case of identical date values,
# these methods will use the ID as a fallback check. This guarantees
# that no records are skipped or duplicated.
self.assertEqual(repr(self.a1.get_next_by_pub_date()), "<Article: Article 2>")
self.assertEqual(repr(self.a2.get_next_by_pub_date()), "<Article: Article 3>")
self.assertEqual(
repr(self.a2.get_next_by_pub_date(headline__endswith="6")),
"<Article: Article 6>",
)
self.assertEqual(repr(self.a3.get_next_by_pub_date()), "<Article: Article 7>")
self.assertEqual(repr(self.a4.get_next_by_pub_date()), "<Article: Article 6>")
with self.assertRaises(Article.DoesNotExist):
self.a5.get_next_by_pub_date()
self.assertEqual(repr(self.a6.get_next_by_pub_date()), "<Article: Article 5>")
self.assertEqual(repr(self.a7.get_next_by_pub_date()), "<Article: Article 4>")
self.assertEqual(
repr(self.a7.get_previous_by_pub_date()), "<Article: Article 3>"
)
self.assertEqual(
repr(self.a6.get_previous_by_pub_date()), "<Article: Article 4>"
)
self.assertEqual(
repr(self.a5.get_previous_by_pub_date()), "<Article: Article 6>"
)
self.assertEqual(
repr(self.a4.get_previous_by_pub_date()), "<Article: Article 7>"
)
self.assertEqual(
repr(self.a3.get_previous_by_pub_date()), "<Article: Article 2>"
)
self.assertEqual(
repr(self.a2.get_previous_by_pub_date()), "<Article: Article 1>"
)
def test_escaping(self):
# Underscores, percent signs and backslashes have special meaning in the
# underlying SQL code, but Django handles the quoting of them automatically.
a8 = Article.objects.create(
headline="Article_ with underscore", pub_date=datetime(2005, 11, 20)
)
self.assertSequenceEqual(
Article.objects.filter(headline__startswith="Article"),
[a8, self.a5, self.a6, self.a4, self.a2, self.a3, self.a7, self.a1],
)
self.assertSequenceEqual(
Article.objects.filter(headline__startswith="Article_"),
[a8],
)
a9 = Article.objects.create(
headline="Article% with percent sign", pub_date=datetime(2005, 11, 21)
)
self.assertSequenceEqual(
Article.objects.filter(headline__startswith="Article"),
[a9, a8, self.a5, self.a6, self.a4, self.a2, self.a3, self.a7, self.a1],
)
self.assertSequenceEqual(
Article.objects.filter(headline__startswith="Article%"),
[a9],
)
a10 = Article.objects.create(
headline="Article with \\ backslash", pub_date=datetime(2005, 11, 22)
)
self.assertSequenceEqual(
Article.objects.filter(headline__contains="\\"),
[a10],
)
def test_exclude(self):
pub_date = datetime(2005, 11, 20)
a8 = Article.objects.create(
headline="Article_ with underscore", pub_date=pub_date
)
a9 = Article.objects.create(
headline="Article% with percent sign", pub_date=pub_date
)
a10 = Article.objects.create(
headline="Article with \\ backslash", pub_date=pub_date
)
# exclude() is the opposite of filter() when doing lookups:
self.assertSequenceEqual(
Article.objects.filter(headline__contains="Article").exclude(
headline__contains="with"
),
[self.a5, self.a6, self.a4, self.a2, self.a3, self.a7, self.a1],
)
self.assertSequenceEqual(
Article.objects.exclude(headline__startswith="Article_"),
[a10, a9, self.a5, self.a6, self.a4, self.a2, self.a3, self.a7, self.a1],
)
self.assertSequenceEqual(
Article.objects.exclude(headline="Article 7"),
[a10, a9, a8, self.a5, self.a6, self.a4, self.a2, self.a3, self.a1],
)
def test_none(self):
# none() returns a QuerySet that behaves like any other QuerySet object
self.assertQuerysetEqual(Article.objects.none(), [])
self.assertQuerysetEqual(
Article.objects.none().filter(headline__startswith="Article"), []
)
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith="Article").none(), []
)
self.assertEqual(Article.objects.none().count(), 0)
self.assertEqual(
Article.objects.none().update(headline="This should not take effect"), 0
)
self.assertQuerysetEqual(Article.objects.none().iterator(), [])
def test_in(self):
self.assertSequenceEqual(
Article.objects.exclude(id__in=[]),
[self.a5, self.a6, self.a4, self.a2, self.a3, self.a7, self.a1],
)
def test_in_empty_list(self):
self.assertSequenceEqual(Article.objects.filter(id__in=[]), [])
def test_in_different_database(self):
with self.assertRaisesMessage(
ValueError,
"Subqueries aren't allowed across different databases. Force the "
"inner query to be evaluated using `list(inner_query)`.",
):
list(Article.objects.filter(id__in=Article.objects.using("other").all()))
def test_in_keeps_value_ordering(self):
query = (
Article.objects.filter(slug__in=["a%d" % i for i in range(1, 8)])
.values("pk")
.query
)
self.assertIn(" IN (a1, a2, a3, a4, a5, a6, a7) ", str(query))
def test_in_ignore_none(self):
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(
Article.objects.filter(id__in=[None, self.a1.id]),
[self.a1],
)
sql = ctx.captured_queries[0]["sql"]
self.assertIn("IN (%s)" % self.a1.pk, sql)
def test_in_ignore_solo_none(self):
with self.assertNumQueries(0):
self.assertSequenceEqual(Article.objects.filter(id__in=[None]), [])
def test_in_ignore_none_with_unhashable_items(self):
class UnhashableInt(int):
__hash__ = None
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(
Article.objects.filter(id__in=[None, UnhashableInt(self.a1.id)]),
[self.a1],
)
sql = ctx.captured_queries[0]["sql"]
self.assertIn("IN (%s)" % self.a1.pk, sql)
def test_error_messages(self):
# Programming errors are pointed out with nice error messages
with self.assertRaisesMessage(
FieldError,
"Cannot resolve keyword 'pub_date_year' into field. Choices are: "
"author, author_id, headline, id, pub_date, slug, tag",
):
Article.objects.filter(pub_date_year="2005").count()
def test_unsupported_lookups(self):
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'starts' for CharField or join on the field "
"not permitted, perhaps you meant startswith or istartswith?",
):
Article.objects.filter(headline__starts="Article")
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'is_null' for DateTimeField or join on the field "
"not permitted, perhaps you meant isnull?",
):
Article.objects.filter(pub_date__is_null=True)
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'gobbledygook' for DateTimeField or join on the field "
"not permitted.",
):
Article.objects.filter(pub_date__gobbledygook="blahblah")
def test_relation_nested_lookup_error(self):
# An invalid nested lookup on a related field raises a useful error.
msg = (
"Unsupported lookup 'editor' for ForeignKey or join on the field not "
"permitted."
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(author__editor__name="James")
msg = (
"Unsupported lookup 'foo' for ForeignKey or join on the field not "
"permitted."
)
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.filter(articles__foo="bar")
def test_regex(self):
# Create some articles with a bit more interesting headlines for
# testing field lookups.
Article.objects.all().delete()
now = datetime.now()
Article.objects.bulk_create(
[
Article(pub_date=now, headline="f"),
Article(pub_date=now, headline="fo"),
Article(pub_date=now, headline="foo"),
Article(pub_date=now, headline="fooo"),
Article(pub_date=now, headline="hey-Foo"),
Article(pub_date=now, headline="bar"),
Article(pub_date=now, headline="AbBa"),
Article(pub_date=now, headline="baz"),
Article(pub_date=now, headline="baxZ"),
]
)
# zero-or-more
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r"fo*"),
Article.objects.filter(headline__in=["f", "fo", "foo", "fooo"]),
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r"fo*"),
Article.objects.filter(headline__in=["f", "fo", "foo", "fooo", "hey-Foo"]),
)
# one-or-more
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r"fo+"),
Article.objects.filter(headline__in=["fo", "foo", "fooo"]),
)
# wildcard
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r"fooo?"),
Article.objects.filter(headline__in=["foo", "fooo"]),
)
# leading anchor
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r"^b"),
Article.objects.filter(headline__in=["bar", "baxZ", "baz"]),
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r"^a"),
Article.objects.filter(headline="AbBa"),
)
# trailing anchor
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r"z$"),
Article.objects.filter(headline="baz"),
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r"z$"),
Article.objects.filter(headline__in=["baxZ", "baz"]),
)
# character sets
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r"ba[rz]"),
Article.objects.filter(headline__in=["bar", "baz"]),
)
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r"ba.[RxZ]"),
Article.objects.filter(headline="baxZ"),
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r"ba[RxZ]"),
Article.objects.filter(headline__in=["bar", "baxZ", "baz"]),
)
# and more articles:
Article.objects.bulk_create(
[
Article(pub_date=now, headline="foobar"),
Article(pub_date=now, headline="foobaz"),
Article(pub_date=now, headline="ooF"),
Article(pub_date=now, headline="foobarbaz"),
Article(pub_date=now, headline="zoocarfaz"),
Article(pub_date=now, headline="barfoobaz"),
Article(pub_date=now, headline="bazbaRFOO"),
]
)
# alternation
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r"oo(f|b)"),
Article.objects.filter(
headline__in=[
"barfoobaz",
"foobar",
"foobarbaz",
"foobaz",
]
),
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r"oo(f|b)"),
Article.objects.filter(
headline__in=[
"barfoobaz",
"foobar",
"foobarbaz",
"foobaz",
"ooF",
]
),
)
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r"^foo(f|b)"),
Article.objects.filter(headline__in=["foobar", "foobarbaz", "foobaz"]),
)
# greedy matching
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r"b.*az"),
Article.objects.filter(
headline__in=[
"barfoobaz",
"baz",
"bazbaRFOO",
"foobarbaz",
"foobaz",
]
),
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r"b.*ar"),
Article.objects.filter(
headline__in=[
"bar",
"barfoobaz",
"bazbaRFOO",
"foobar",
"foobarbaz",
]
),
)
@skipUnlessDBFeature("supports_regex_backreferencing")
def test_regex_backreferencing(self):
# grouping and backreferences
now = datetime.now()
Article.objects.bulk_create(
[
Article(pub_date=now, headline="foobar"),
Article(pub_date=now, headline="foobaz"),
Article(pub_date=now, headline="ooF"),
Article(pub_date=now, headline="foobarbaz"),
Article(pub_date=now, headline="zoocarfaz"),
Article(pub_date=now, headline="barfoobaz"),
Article(pub_date=now, headline="bazbaRFOO"),
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r"b(.).*b\1").values_list(
"headline", flat=True
),
["barfoobaz", "bazbaRFOO", "foobarbaz"],
)
def test_regex_null(self):
"""
A regex lookup does not fail on null/None values
"""
Season.objects.create(year=2012, gt=None)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r"^$"), [])
def test_regex_non_string(self):
"""
A regex lookup does not fail on non-string fields
"""
s = Season.objects.create(year=2013, gt=444)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r"^444$"), [s])
def test_regex_non_ascii(self):
"""
A regex lookup does not trip on non-ASCII characters.
"""
Player.objects.create(name="\u2660")
Player.objects.get(name__regex="\u2660")
def test_nonfield_lookups(self):
"""
A lookup query containing non-fields raises the proper exception.
"""
msg = (
"Unsupported lookup 'blahblah' for CharField or join on the field not "
"permitted."
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(headline__blahblah=99)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(headline__blahblah__exact=99)
msg = (
"Cannot resolve keyword 'blahblah' into field. Choices are: "
"author, author_id, headline, id, pub_date, slug, tag"
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(blahblah=99)
def test_lookup_collision(self):
"""
Genuine field names don't collide with built-in lookup types
('year', 'gt', 'range', 'in' etc.) (#11670).
"""
# 'gt' is used as a code number for the year, e.g. 111=>2009.
season_2009 = Season.objects.create(year=2009, gt=111)
season_2009.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2010 = Season.objects.create(year=2010, gt=222)
season_2010.games.create(home="Houston Astros", away="Chicago Cubs")
season_2010.games.create(home="Houston Astros", away="Milwaukee Brewers")
season_2010.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011 = Season.objects.create(year=2011, gt=333)
season_2011.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011.games.create(home="Houston Astros", away="Milwaukee Brewers")
hunter_pence = Player.objects.create(name="Hunter Pence")
hunter_pence.games.set(Game.objects.filter(season__year__in=[2009, 2010]))
pudge = Player.objects.create(name="Ivan Rodriquez")
pudge.games.set(Game.objects.filter(season__year=2009))
pedro_feliz = Player.objects.create(name="Pedro Feliz")
pedro_feliz.games.set(Game.objects.filter(season__year__in=[2011]))
johnson = Player.objects.create(name="Johnson")
johnson.games.set(Game.objects.filter(season__year__in=[2011]))
# Games in 2010
self.assertEqual(Game.objects.filter(season__year=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__year__exact=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__gt=222).count(), 3)
self.assertEqual(Game.objects.filter(season__gt__exact=222).count(), 3)
# Games in 2011
self.assertEqual(Game.objects.filter(season__year=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__year__exact=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__gt=333).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__exact=333).count(), 2)
self.assertEqual(Game.objects.filter(season__year__gt=2010).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__gt=222).count(), 2)
# Games played in 2010 and 2011
self.assertEqual(Game.objects.filter(season__year__in=[2010, 2011]).count(), 5)
self.assertEqual(Game.objects.filter(season__year__gt=2009).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__in=[222, 333]).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__gt=111).count(), 5)
# Players who played in 2009
self.assertEqual(
Player.objects.filter(games__season__year=2009).distinct().count(), 2
)
self.assertEqual(
Player.objects.filter(games__season__year__exact=2009).distinct().count(), 2
)
self.assertEqual(
Player.objects.filter(games__season__gt=111).distinct().count(), 2
)
self.assertEqual(
Player.objects.filter(games__season__gt__exact=111).distinct().count(), 2
)
# Players who played in 2010
self.assertEqual(
Player.objects.filter(games__season__year=2010).distinct().count(), 1
)
self.assertEqual(
Player.objects.filter(games__season__year__exact=2010).distinct().count(), 1
)
self.assertEqual(
Player.objects.filter(games__season__gt=222).distinct().count(), 1
)
self.assertEqual(
Player.objects.filter(games__season__gt__exact=222).distinct().count(), 1
)
# Players who played in 2011
self.assertEqual(
Player.objects.filter(games__season__year=2011).distinct().count(), 2
)
self.assertEqual(
Player.objects.filter(games__season__year__exact=2011).distinct().count(), 2
)
self.assertEqual(
Player.objects.filter(games__season__gt=333).distinct().count(), 2
)
self.assertEqual(
Player.objects.filter(games__season__year__gt=2010).distinct().count(), 2
)
self.assertEqual(
Player.objects.filter(games__season__gt__gt=222).distinct().count(), 2
)
def test_chain_date_time_lookups(self):
self.assertCountEqual(
Article.objects.filter(pub_date__month__gt=7),
[self.a5, self.a6],
)
self.assertCountEqual(
Article.objects.filter(pub_date__day__gte=27),
[self.a2, self.a3, self.a4, self.a7],
)
self.assertCountEqual(
Article.objects.filter(pub_date__hour__lt=8),
[self.a1, self.a2, self.a3, self.a4, self.a7],
)
self.assertCountEqual(
Article.objects.filter(pub_date__minute__lte=0),
[self.a1, self.a2, self.a3, self.a4, self.a5, self.a6, self.a7],
)
def test_exact_none_transform(self):
"""Transforms are used for __exact=None."""
Season.objects.create(year=1, nulled_text_field="not null")
self.assertFalse(Season.objects.filter(nulled_text_field__isnull=True))
self.assertTrue(Season.objects.filter(nulled_text_field__nulled__isnull=True))
self.assertTrue(Season.objects.filter(nulled_text_field__nulled__exact=None))
self.assertTrue(Season.objects.filter(nulled_text_field__nulled=None))
def test_exact_sliced_queryset_limit_one(self):
self.assertCountEqual(
Article.objects.filter(author=Author.objects.all()[:1]),
[self.a1, self.a2, self.a3, self.a4],
)
def test_exact_sliced_queryset_limit_one_offset(self):
self.assertCountEqual(
Article.objects.filter(author=Author.objects.all()[1:2]),
[self.a5, self.a6, self.a7],
)
def test_exact_sliced_queryset_not_limited_to_one(self):
msg = (
"The QuerySet value for an exact lookup must be limited to one "
"result using slicing."
)
with self.assertRaisesMessage(ValueError, msg):
list(Article.objects.filter(author=Author.objects.all()[:2]))
with self.assertRaisesMessage(ValueError, msg):
list(Article.objects.filter(author=Author.objects.all()[1:]))
@skipUnless(connection.vendor == "mysql", "MySQL-specific workaround.")
def test_exact_booleanfield(self):
# MySQL ignores indexes with boolean fields unless they're compared
# directly to a boolean value.
product = Product.objects.create(name="Paper", qty_target=5000)
Stock.objects.create(product=product, short=False, qty_available=5100)
stock_1 = Stock.objects.create(product=product, short=True, qty_available=180)
qs = Stock.objects.filter(short=True)
self.assertSequenceEqual(qs, [stock_1])
self.assertIn(
"%s = True" % connection.ops.quote_name("short"),
str(qs.query),
)
@skipUnless(connection.vendor == "mysql", "MySQL-specific workaround.")
def test_exact_booleanfield_annotation(self):
# MySQL ignores indexes with boolean fields unless they're compared
# directly to a boolean value.
qs = Author.objects.annotate(
case=Case(
When(alias="a1", then=True),
default=False,
output_field=BooleanField(),
)
).filter(case=True)
self.assertSequenceEqual(qs, [self.au1])
self.assertIn(" = True", str(qs.query))
qs = Author.objects.annotate(
wrapped=ExpressionWrapper(Q(alias="a1"), output_field=BooleanField()),
).filter(wrapped=True)
self.assertSequenceEqual(qs, [self.au1])
self.assertIn(" = True", str(qs.query))
# EXISTS(...) shouldn't be compared to a boolean value.
qs = Author.objects.annotate(
exists=Exists(Author.objects.filter(alias="a1", pk=OuterRef("pk"))),
).filter(exists=True)
self.assertSequenceEqual(qs, [self.au1])
self.assertNotIn(" = True", str(qs.query))
def test_custom_field_none_rhs(self):
"""
__exact=value is transformed to __isnull=True if Field.get_prep_value()
converts value to None.
"""
season = Season.objects.create(year=2012, nulled_text_field=None)
self.assertTrue(
Season.objects.filter(pk=season.pk, nulled_text_field__isnull=True)
)
self.assertTrue(Season.objects.filter(pk=season.pk, nulled_text_field=""))
def test_pattern_lookups_with_substr(self):
a = Author.objects.create(name="John Smith", alias="Johx")
b = Author.objects.create(name="Rhonda Simpson", alias="sonx")
tests = (
("startswith", [a]),
("istartswith", [a]),
("contains", [a, b]),
("icontains", [a, b]),
("endswith", [b]),
("iendswith", [b]),
)
for lookup, result in tests:
with self.subTest(lookup=lookup):
authors = Author.objects.filter(
**{"name__%s" % lookup: Substr("alias", 1, 3)}
)
self.assertCountEqual(authors, result)
def test_custom_lookup_none_rhs(self):
"""Lookup.can_use_none_as_rhs=True allows None as a lookup value."""
season = Season.objects.create(year=2012, nulled_text_field=None)
query = Season.objects.get_queryset().query
field = query.model._meta.get_field("nulled_text_field")
self.assertIsInstance(
query.build_lookup(["isnull_none_rhs"], field, None), IsNullWithNoneAsRHS
)
self.assertTrue(
Season.objects.filter(pk=season.pk, nulled_text_field__isnull_none_rhs=True)
)
def test_exact_exists(self):
qs = Article.objects.filter(pk=OuterRef("pk"))
seasons = Season.objects.annotate(pk_exists=Exists(qs),).filter(
pk_exists=Exists(qs),
)
self.assertCountEqual(seasons, Season.objects.all())
def test_nested_outerref_lhs(self):
tag = Tag.objects.create(name=self.au1.alias)
tag.articles.add(self.a1)
qs = Tag.objects.annotate(
has_author_alias_match=Exists(
Article.objects.annotate(
author_exists=Exists(
Author.objects.filter(alias=OuterRef(OuterRef("name")))
),
).filter(author_exists=True)
),
)
self.assertEqual(qs.get(has_author_alias_match=True), tag)
def test_exact_query_rhs_with_selected_columns(self):
newest_author = Author.objects.create(name="Author 2")
authors_max_ids = (
Author.objects.filter(
name="Author 2",
)
.values(
"name",
)
.annotate(
max_id=Max("id"),
)
.values("max_id")
)
authors = Author.objects.filter(id=authors_max_ids[:1])
self.assertEqual(authors.get(), newest_author)
def test_isnull_non_boolean_value(self):
msg = "The QuerySet value for an isnull lookup must be True or False."
tests = [
Author.objects.filter(alias__isnull=1),
Article.objects.filter(author__isnull=1),
Season.objects.filter(games__isnull=1),
Freebie.objects.filter(stock__isnull=1),
]
for qs in tests:
with self.subTest(qs=qs):
with self.assertRaisesMessage(ValueError, msg):
qs.exists()
def test_lookup_rhs(self):
product = Product.objects.create(name="GME", qty_target=5000)
stock_1 = Stock.objects.create(product=product, short=True, qty_available=180)
stock_2 = Stock.objects.create(product=product, short=False, qty_available=5100)
Stock.objects.create(product=product, short=False, qty_available=4000)
self.assertCountEqual(
Stock.objects.filter(short=Q(qty_available__lt=F("product__qty_target"))),
[stock_1, stock_2],
)
self.assertCountEqual(
Stock.objects.filter(
short=ExpressionWrapper(
Q(qty_available__lt=F("product__qty_target")),
output_field=BooleanField(),
)
),
[stock_1, stock_2],
)
class LookupQueryingTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Season.objects.create(year=1942, gt=1942)
cls.s2 = Season.objects.create(year=1842, gt=1942, nulled_text_field="text")
cls.s3 = Season.objects.create(year=2042, gt=1942)
def test_annotate(self):
qs = Season.objects.annotate(equal=Exact(F("year"), 1942))
self.assertCountEqual(
qs.values_list("year", "equal"),
((1942, True), (1842, False), (2042, False)),
)
def test_alias(self):
qs = Season.objects.alias(greater=GreaterThan(F("year"), 1910))
self.assertCountEqual(qs.filter(greater=True), [self.s1, self.s3])
def test_annotate_value_greater_than_value(self):
qs = Season.objects.annotate(greater=GreaterThan(Value(40), Value(30)))
self.assertCountEqual(
qs.values_list("year", "greater"),
((1942, True), (1842, True), (2042, True)),
)
def test_annotate_field_greater_than_field(self):
qs = Season.objects.annotate(greater=GreaterThan(F("year"), F("gt")))
self.assertCountEqual(
qs.values_list("year", "greater"),
((1942, False), (1842, False), (2042, True)),
)
def test_annotate_field_greater_than_value(self):
qs = Season.objects.annotate(greater=GreaterThan(F("year"), Value(1930)))
self.assertCountEqual(
qs.values_list("year", "greater"),
((1942, True), (1842, False), (2042, True)),
)
def test_annotate_field_greater_than_literal(self):
qs = Season.objects.annotate(greater=GreaterThan(F("year"), 1930))
self.assertCountEqual(
qs.values_list("year", "greater"),
((1942, True), (1842, False), (2042, True)),
)
def test_annotate_literal_greater_than_field(self):
qs = Season.objects.annotate(greater=GreaterThan(1930, F("year")))
self.assertCountEqual(
qs.values_list("year", "greater"),
((1942, False), (1842, True), (2042, False)),
)
def test_annotate_less_than_float(self):
qs = Season.objects.annotate(lesser=LessThan(F("year"), 1942.1))
self.assertCountEqual(
qs.values_list("year", "lesser"),
((1942, True), (1842, True), (2042, False)),
)
def test_annotate_greater_than_or_equal(self):
qs = Season.objects.annotate(greater=GreaterThanOrEqual(F("year"), 1942))
self.assertCountEqual(
qs.values_list("year", "greater"),
((1942, True), (1842, False), (2042, True)),
)
def test_annotate_greater_than_or_equal_float(self):
qs = Season.objects.annotate(greater=GreaterThanOrEqual(F("year"), 1942.1))
self.assertCountEqual(
qs.values_list("year", "greater"),
((1942, False), (1842, False), (2042, True)),
)
def test_combined_lookups(self):
expression = Exact(F("year"), 1942) | GreaterThan(F("year"), 1942)
qs = Season.objects.annotate(gte=expression)
self.assertCountEqual(
qs.values_list("year", "gte"),
((1942, True), (1842, False), (2042, True)),
)
def test_lookup_in_filter(self):
qs = Season.objects.filter(GreaterThan(F("year"), 1910))
self.assertCountEqual(qs, [self.s1, self.s3])
def test_isnull_lookup_in_filter(self):
self.assertSequenceEqual(
Season.objects.filter(IsNull(F("nulled_text_field"), False)),
[self.s2],
)
self.assertCountEqual(
Season.objects.filter(IsNull(F("nulled_text_field"), True)),
[self.s1, self.s3],
)
def test_filter_lookup_lhs(self):
qs = Season.objects.annotate(before_20=LessThan(F("year"), 2000)).filter(
before_20=LessThan(F("year"), 1900),
)
self.assertCountEqual(qs, [self.s2, self.s3])
def test_filter_wrapped_lookup_lhs(self):
qs = (
Season.objects.annotate(
before_20=ExpressionWrapper(
Q(year__lt=2000),
output_field=BooleanField(),
)
)
.filter(before_20=LessThan(F("year"), 1900))
.values_list("year", flat=True)
)
self.assertCountEqual(qs, [1842, 2042])
def test_filter_exists_lhs(self):
qs = Season.objects.annotate(
before_20=Exists(
Season.objects.filter(pk=OuterRef("pk"), year__lt=2000),
)
).filter(before_20=LessThan(F("year"), 1900))
self.assertCountEqual(qs, [self.s2, self.s3])
def test_filter_subquery_lhs(self):
qs = Season.objects.annotate(
before_20=Subquery(
Season.objects.filter(pk=OuterRef("pk")).values(
lesser=LessThan(F("year"), 2000),
),
)
).filter(before_20=LessThan(F("year"), 1900))
self.assertCountEqual(qs, [self.s2, self.s3])
def test_combined_lookups_in_filter(self):
expression = Exact(F("year"), 1942) | GreaterThan(F("year"), 1942)
qs = Season.objects.filter(expression)
self.assertCountEqual(qs, [self.s1, self.s3])
def test_combined_annotated_lookups_in_filter(self):
expression = Exact(F("year"), 1942) | GreaterThan(F("year"), 1942)
qs = Season.objects.annotate(gte=expression).filter(gte=True)
self.assertCountEqual(qs, [self.s1, self.s3])
def test_combined_annotated_lookups_in_filter_false(self):
expression = Exact(F("year"), 1942) | GreaterThan(F("year"), 1942)
qs = Season.objects.annotate(gte=expression).filter(gte=False)
self.assertSequenceEqual(qs, [self.s2])
def test_lookup_in_order_by(self):
qs = Season.objects.order_by(LessThan(F("year"), 1910), F("year"))
self.assertSequenceEqual(qs, [self.s1, self.s3, self.s2])
@skipUnlessDBFeature("supports_boolean_expr_in_select_clause")
def test_aggregate_combined_lookup(self):
expression = Cast(GreaterThan(F("year"), 1900), models.IntegerField())
qs = Season.objects.aggregate(modern=models.Sum(expression))
self.assertEqual(qs["modern"], 2)
def test_conditional_expression(self):
qs = Season.objects.annotate(
century=Case(
When(
GreaterThan(F("year"), 1900) & LessThanOrEqual(F("year"), 2000),
then=Value("20th"),
),
default=Value("other"),
)
).values("year", "century")
self.assertCountEqual(
qs,
[
{"year": 1942, "century": "20th"},
{"year": 1842, "century": "other"},
{"year": 2042, "century": "other"},
],
)
|
717670750dfe1cdb6c3b408dee62b3910065cc40ac1ac98bdb307349ddb0e43b | import datetime
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.templatetags.admin_list import pagination
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.admin.views.main import (
ALL_VAR,
IS_POPUP_VAR,
ORDER_VAR,
PAGE_VAR,
SEARCH_VAR,
TO_FIELD_VAR,
)
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.messages.storage.cookie import CookieStorage
from django.db import connection, models
from django.db.models import F, Field, IntegerField
from django.db.models.functions import Upper
from django.db.models.lookups import Contains, Exact
from django.template import Context, Template, TemplateSyntaxError
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.test.utils import CaptureQueriesContext, isolate_apps, register_lookup
from django.urls import reverse
from django.utils import formats
from .admin import (
BandAdmin,
ChildAdmin,
ChordsBandAdmin,
ConcertAdmin,
CustomPaginationAdmin,
CustomPaginator,
DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin,
DynamicListFilterChildAdmin,
DynamicSearchFieldsChildAdmin,
EmptyValueChildAdmin,
EventAdmin,
FilteredChildAdmin,
GroupAdmin,
InvitationAdmin,
NoListDisplayLinksParentAdmin,
ParentAdmin,
ParentAdminTwoSearchFields,
QuartetAdmin,
SwallowAdmin,
)
from .admin import site as custom_site
from .models import (
Band,
CharPK,
Child,
ChordsBand,
ChordsMusician,
Concert,
CustomIdUser,
Event,
Genre,
Group,
Invitation,
Membership,
Musician,
OrderedObject,
Parent,
Quartet,
Swallow,
SwallowOneToOne,
UnorderedObject,
)
def build_tbody_html(pk, href, extra_fields):
return (
"<tbody><tr>"
'<td class="action-checkbox">'
'<input type="checkbox" name="_selected_action" value="{}" '
'class="action-select"></td>'
'<th class="field-name"><a href="{}">name</a></th>'
"{}</tr></tbody>"
).format(pk, href, extra_fields)
@override_settings(ROOT_URLCONF="admin_changelist.urls")
class ChangeListTests(TestCase):
factory = RequestFactory()
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", email="[email protected]", password="xxx"
)
def _create_superuser(self, username):
return User.objects.create_superuser(
username=username, email="[email protected]", password="xxx"
)
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_repr(self):
m = ChildAdmin(Child, custom_site)
request = self.factory.get("/child/")
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(repr(cl), "<ChangeList: model=Child model_admin=ChildAdmin>")
def test_specified_ordering_by_f_expression(self):
class OrderedByFBandAdmin(admin.ModelAdmin):
list_display = ["name", "genres", "nr_of_members"]
ordering = (
F("nr_of_members").desc(nulls_last=True),
Upper(F("name")).asc(),
F("genres").asc(),
)
m = OrderedByFBandAdmin(Band, custom_site)
request = self.factory.get("/band/")
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.get_ordering_field_columns(), {3: "desc", 2: "asc"})
def test_specified_ordering_by_f_expression_without_asc_desc(self):
class OrderedByFBandAdmin(admin.ModelAdmin):
list_display = ["name", "genres", "nr_of_members"]
ordering = (F("nr_of_members"), Upper("name"), F("genres"))
m = OrderedByFBandAdmin(Band, custom_site)
request = self.factory.get("/band/")
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.get_ordering_field_columns(), {3: "asc", 2: "asc"})
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_queryset() shouldn't
overwrite a custom select_related provided by ModelAdmin.get_queryset().
"""
m = ChildAdmin(Child, custom_site)
request = self.factory.get("/child/")
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.query.select_related, {"parent": {}})
def test_select_related_preserved_when_multi_valued_in_search_fields(self):
parent = Parent.objects.create(name="Mary")
Child.objects.create(parent=parent, name="Danielle")
Child.objects.create(parent=parent, name="Daniel")
m = ParentAdmin(Parent, custom_site)
request = self.factory.get("/parent/", data={SEARCH_VAR: "daniel"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 1)
# select_related is preserved.
self.assertEqual(cl.queryset.query.select_related, {"child": {}})
def test_select_related_as_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
request = self.factory.get("/invitation/")
request.user = self.superuser
cl = ia.get_changelist_instance(request)
self.assertEqual(cl.queryset.query.select_related, {"player": {}})
def test_select_related_as_empty_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
ia.list_select_related = ()
request = self.factory.get("/invitation/")
request.user = self.superuser
cl = ia.get_changelist_instance(request)
self.assertIs(cl.queryset.query.select_related, False)
def test_get_select_related_custom_method(self):
class GetListSelectRelatedAdmin(admin.ModelAdmin):
list_display = ("band", "player")
def get_list_select_related(self, request):
return ("band", "player")
ia = GetListSelectRelatedAdmin(Invitation, custom_site)
request = self.factory.get("/invitation/")
request.user = self.superuser
cl = ia.get_changelist_instance(request)
self.assertEqual(cl.queryset.query.select_related, {"player": {}, "band": {}})
def test_many_search_terms(self):
parent = Parent.objects.create(name="Mary")
Child.objects.create(parent=parent, name="Danielle")
Child.objects.create(parent=parent, name="Daniel")
m = ParentAdmin(Parent, custom_site)
request = self.factory.get("/parent/", data={SEARCH_VAR: "daniel " * 80})
request.user = self.superuser
cl = m.get_changelist_instance(request)
with CaptureQueriesContext(connection) as context:
object_count = cl.queryset.count()
self.assertEqual(object_count, 1)
self.assertEqual(context.captured_queries[0]["sql"].count("JOIN"), 1)
def test_related_field_multiple_search_terms(self):
"""
Searches over multi-valued relationships return rows from related
models only when all searched fields match that row.
"""
parent = Parent.objects.create(name="Mary")
Child.objects.create(parent=parent, name="Danielle", age=18)
Child.objects.create(parent=parent, name="Daniel", age=19)
m = ParentAdminTwoSearchFields(Parent, custom_site)
request = self.factory.get("/parent/", data={SEARCH_VAR: "danielle 19"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 0)
request = self.factory.get("/parent/", data={SEARCH_VAR: "daniel 19"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 1)
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name="name", parent=None)
request = self.factory.get("/child/")
request.user = self.superuser
m = ChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template(
"{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}"
)
context = Context({"cl": cl, "opts": Child._meta})
table_output = template.render(context)
link = reverse("admin:admin_changelist_child_change", args=(new_child.id,))
row_html = build_tbody_html(
new_child.id, link, '<td class="field-parent nowrap">-</td>'
)
self.assertNotEqual(
table_output.find(row_html),
-1,
"Failed to find expected row element: %s" % table_output,
)
def test_result_list_set_empty_value_display_on_admin_site(self):
"""
Empty value display can be set on AdminSite.
"""
new_child = Child.objects.create(name="name", parent=None)
request = self.factory.get("/child/")
request.user = self.superuser
# Set a new empty display value on AdminSite.
admin.site.empty_value_display = "???"
m = ChildAdmin(Child, admin.site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template(
"{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}"
)
context = Context({"cl": cl, "opts": Child._meta})
table_output = template.render(context)
link = reverse("admin:admin_changelist_child_change", args=(new_child.id,))
row_html = build_tbody_html(
new_child.id, link, '<td class="field-parent nowrap">???</td>'
)
self.assertNotEqual(
table_output.find(row_html),
-1,
"Failed to find expected row element: %s" % table_output,
)
def test_result_list_set_empty_value_display_in_model_admin(self):
"""
Empty value display can be set in ModelAdmin or individual fields.
"""
new_child = Child.objects.create(name="name", parent=None)
request = self.factory.get("/child/")
request.user = self.superuser
m = EmptyValueChildAdmin(Child, admin.site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template(
"{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}"
)
context = Context({"cl": cl, "opts": Child._meta})
table_output = template.render(context)
link = reverse("admin:admin_changelist_child_change", args=(new_child.id,))
row_html = build_tbody_html(
new_child.id,
link,
'<td class="field-age_display">&dagger;</td>'
'<td class="field-age">-empty-</td>',
)
self.assertNotEqual(
table_output.find(row_html),
-1,
"Failed to find expected row element: %s" % table_output,
)
def test_result_list_html(self):
"""
Inclusion tag result_list generates a table when with default
ModelAdmin settings.
"""
new_parent = Parent.objects.create(name="parent")
new_child = Child.objects.create(name="name", parent=new_parent)
request = self.factory.get("/child/")
request.user = self.superuser
m = ChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template(
"{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}"
)
context = Context({"cl": cl, "opts": Child._meta})
table_output = template.render(context)
link = reverse("admin:admin_changelist_child_change", args=(new_child.id,))
row_html = build_tbody_html(
new_child.id, link, '<td class="field-parent nowrap">%s</td>' % new_parent
)
self.assertNotEqual(
table_output.find(row_html),
-1,
"Failed to find expected row element: %s" % table_output,
)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name="parent")
new_child = Child.objects.create(name="name", parent=new_parent)
request = self.factory.get("/child/")
request.user = self.superuser
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ["id", "name", "parent"]
m.list_display_links = ["id"]
m.list_editable = ["name"]
cl = m.get_changelist_instance(request)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template(
"{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}"
)
context = Context({"cl": cl, "opts": Child._meta})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = (
'<div class="hiddenfields">'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id">'
"</div>"
) % new_child.id
self.assertInHTML(
hiddenfields_div, table_output, msg_prefix="Failed to find hidden fields"
)
# make sure that list editable fields are rendered in divs correctly
editable_name_field = (
'<input name="form-0-name" value="name" class="vTextField" '
'maxlength="30" type="text" id="id_form-0-name">'
)
self.assertInHTML(
'<td class="field-name">%s</td>' % editable_name_field,
table_output,
msg_prefix='Failed to find "name" list_editable field',
)
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name="parent")
for i in range(1, 201):
Child.objects.create(name="name %s" % i, parent=new_parent)
request = self.factory.get("/child/", data={"p": -1}) # Anything outside range
request.user = self.superuser
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ["id", "name", "parent"]
m.list_display_links = ["id"]
m.list_editable = ["name"]
with self.assertRaises(IncorrectLookupParameters):
m.get_changelist_instance(request)
def test_custom_paginator(self):
new_parent = Parent.objects.create(name="parent")
for i in range(1, 201):
Child.objects.create(name="name %s" % i, parent=new_parent)
request = self.factory.get("/child/")
request.user = self.superuser
m = CustomPaginationAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_no_duplicates_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name="Blues")
band = Band.objects.create(name="B.B. King Review", nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, custom_site)
request = self.factory.get("/band/", data={"genres": blues.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
# Queryset must be deletable.
self.assertIs(cl.queryset.query.distinct, False)
cl.queryset.delete()
self.assertEqual(cl.queryset.count(), 0)
def test_no_duplicates_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. With an intermediate model.
"""
lead = Musician.objects.create(name="Vox")
band = Group.objects.create(name="The Hype")
Membership.objects.create(group=band, music=lead, role="lead voice")
Membership.objects.create(group=band, music=lead, role="bass player")
m = GroupAdmin(Group, custom_site)
request = self.factory.get("/group/", data={"members": lead.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
# Queryset must be deletable.
self.assertIs(cl.queryset.query.distinct, False)
cl.queryset.delete()
self.assertEqual(cl.queryset.count(), 0)
def test_no_duplicates_for_through_m2m_at_second_level_in_list_filter(self):
"""
When using a ManyToMany in list_filter at the second level behind a
ForeignKey, results shouldn't appear more than once.
"""
lead = Musician.objects.create(name="Vox")
band = Group.objects.create(name="The Hype")
Concert.objects.create(name="Woodstock", group=band)
Membership.objects.create(group=band, music=lead, role="lead voice")
Membership.objects.create(group=band, music=lead, role="bass player")
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get("/concert/", data={"group__members": lead.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one Concert instance
self.assertEqual(cl.result_count, 1)
# Queryset must be deletable.
self.assertIs(cl.queryset.query.distinct, False)
cl.queryset.delete()
self.assertEqual(cl.queryset.count(), 0)
def test_no_duplicates_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Model managed in the
admin inherits from the one that defines the relationship.
"""
lead = Musician.objects.create(name="John")
four = Quartet.objects.create(name="The Beatles")
Membership.objects.create(group=four, music=lead, role="lead voice")
Membership.objects.create(group=four, music=lead, role="guitar player")
m = QuartetAdmin(Quartet, custom_site)
request = self.factory.get("/quartet/", data={"members": lead.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
# Queryset must be deletable.
self.assertIs(cl.queryset.query.distinct, False)
cl.queryset.delete()
self.assertEqual(cl.queryset.count(), 0)
def test_no_duplicates_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name="Player A")
three = ChordsBand.objects.create(name="The Chords Trio")
Invitation.objects.create(band=three, player=lead, instrument="guitar")
Invitation.objects.create(band=three, player=lead, instrument="bass")
m = ChordsBandAdmin(ChordsBand, custom_site)
request = self.factory.get("/chordsband/", data={"members": lead.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
# Queryset must be deletable.
self.assertIs(cl.queryset.query.distinct, False)
cl.queryset.delete()
self.assertEqual(cl.queryset.count(), 0)
def test_no_duplicates_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters is a
non-unique related object, results shouldn't appear more than once.
"""
parent = Parent.objects.create(name="Mary")
# Two children with the same name
Child.objects.create(parent=parent, name="Daniel")
Child.objects.create(parent=parent, name="Daniel")
m = ParentAdmin(Parent, custom_site)
request = self.factory.get("/parent/", data={"child__name": "Daniel"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
# Exists() is applied.
self.assertEqual(cl.queryset.count(), 1)
# Queryset must be deletable.
self.assertIs(cl.queryset.query.distinct, False)
cl.queryset.delete()
self.assertEqual(cl.queryset.count(), 0)
def test_changelist_search_form_validation(self):
m = ConcertAdmin(Concert, custom_site)
tests = [
({SEARCH_VAR: "\x00"}, "Null characters are not allowed."),
({SEARCH_VAR: "some\x00thing"}, "Null characters are not allowed."),
]
for case, error in tests:
with self.subTest(case=case):
request = self.factory.get("/concert/", case)
request.user = self.superuser
request._messages = CookieStorage(request)
m.get_changelist_instance(request)
messages = [m.message for m in request._messages]
self.assertEqual(1, len(messages))
self.assertEqual(error, messages[0])
def test_no_duplicates_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, Exists() must be applied.
"""
parent = Parent.objects.create(name="Mary")
Child.objects.create(parent=parent, name="Danielle")
Child.objects.create(parent=parent, name="Daniel")
m = ParentAdmin(Parent, custom_site)
request = self.factory.get("/parent/", data={SEARCH_VAR: "daniel"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
# Exists() is applied.
self.assertEqual(cl.queryset.count(), 1)
# Queryset must be deletable.
self.assertIs(cl.queryset.query.distinct, False)
cl.queryset.delete()
self.assertEqual(cl.queryset.count(), 0)
def test_no_duplicates_for_many_to_many_at_second_level_in_search_fields(self):
"""
When using a ManyToMany in search_fields at the second level behind a
ForeignKey, Exists() must be applied and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name="Vox")
band = Group.objects.create(name="The Hype")
Concert.objects.create(name="Woodstock", group=band)
Membership.objects.create(group=band, music=lead, role="lead voice")
Membership.objects.create(group=band, music=lead, role="bass player")
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get("/concert/", data={SEARCH_VAR: "vox"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
# There's only one Concert instance
self.assertEqual(cl.queryset.count(), 1)
# Queryset must be deletable.
self.assertIs(cl.queryset.query.distinct, False)
cl.queryset.delete()
self.assertEqual(cl.queryset.count(), 0)
def test_multiple_search_fields(self):
"""
All rows containing each of the searched words are returned, where each
word must be in one of search_fields.
"""
band_duo = Group.objects.create(name="Duo")
band_hype = Group.objects.create(name="The Hype")
mary = Musician.objects.create(name="Mary Halvorson")
jonathan = Musician.objects.create(name="Jonathan Finlayson")
band_duo.members.set([mary, jonathan])
Concert.objects.create(name="Tiny desk concert", group=band_duo)
Concert.objects.create(name="Woodstock concert", group=band_hype)
# FK lookup.
concert_model_admin = ConcertAdmin(Concert, custom_site)
concert_model_admin.search_fields = ["group__name", "name"]
# Reverse FK lookup.
group_model_admin = GroupAdmin(Group, custom_site)
group_model_admin.search_fields = ["name", "concert__name", "members__name"]
for search_string, result_count in (
("Duo Concert", 1),
("Tiny Desk Concert", 1),
("Concert", 2),
("Other Concert", 0),
("Duo Woodstock", 0),
):
with self.subTest(search_string=search_string):
# FK lookup.
request = self.factory.get(
"/concert/", data={SEARCH_VAR: search_string}
)
request.user = self.superuser
concert_changelist = concert_model_admin.get_changelist_instance(
request
)
self.assertEqual(concert_changelist.queryset.count(), result_count)
# Reverse FK lookup.
request = self.factory.get("/group/", data={SEARCH_VAR: search_string})
request.user = self.superuser
group_changelist = group_model_admin.get_changelist_instance(request)
self.assertEqual(group_changelist.queryset.count(), result_count)
# Many-to-many lookup.
for search_string, result_count in (
("Finlayson Duo Tiny", 1),
("Finlayson", 1),
("Finlayson Hype", 0),
("Jonathan Finlayson Duo", 1),
("Mary Jonathan Duo", 0),
("Oscar Finlayson Duo", 0),
):
with self.subTest(search_string=search_string):
request = self.factory.get("/group/", data={SEARCH_VAR: search_string})
request.user = self.superuser
group_changelist = group_model_admin.get_changelist_instance(request)
self.assertEqual(group_changelist.queryset.count(), result_count)
def test_pk_in_search_fields(self):
band = Group.objects.create(name="The Hype")
Concert.objects.create(name="Woodstock", group=band)
m = ConcertAdmin(Concert, custom_site)
m.search_fields = ["group__pk"]
request = self.factory.get("/concert/", data={SEARCH_VAR: band.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 1)
request = self.factory.get("/concert/", data={SEARCH_VAR: band.pk + 5})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 0)
def test_builtin_lookup_in_search_fields(self):
band = Group.objects.create(name="The Hype")
concert = Concert.objects.create(name="Woodstock", group=band)
m = ConcertAdmin(Concert, custom_site)
m.search_fields = ["name__iexact"]
request = self.factory.get("/", data={SEARCH_VAR: "woodstock"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [concert])
request = self.factory.get("/", data={SEARCH_VAR: "wood"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [])
def test_custom_lookup_in_search_fields(self):
band = Group.objects.create(name="The Hype")
concert = Concert.objects.create(name="Woodstock", group=band)
m = ConcertAdmin(Concert, custom_site)
m.search_fields = ["group__name__cc"]
with register_lookup(Field, Contains, lookup_name="cc"):
request = self.factory.get("/", data={SEARCH_VAR: "Hype"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [concert])
request = self.factory.get("/", data={SEARCH_VAR: "Woodstock"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [])
def test_spanning_relations_with_custom_lookup_in_search_fields(self):
hype = Group.objects.create(name="The Hype")
concert = Concert.objects.create(name="Woodstock", group=hype)
vox = Musician.objects.create(name="Vox", age=20)
Membership.objects.create(music=vox, group=hype)
# Register a custom lookup on IntegerField to ensure that field
# traversing logic in ModelAdmin.get_search_results() works.
with register_lookup(IntegerField, Exact, lookup_name="exactly"):
m = ConcertAdmin(Concert, custom_site)
m.search_fields = ["group__members__age__exactly"]
request = self.factory.get("/", data={SEARCH_VAR: "20"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [concert])
request = self.factory.get("/", data={SEARCH_VAR: "21"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [])
def test_custom_lookup_with_pk_shortcut(self):
self.assertEqual(CharPK._meta.pk.name, "char_pk") # Not equal to 'pk'.
m = admin.ModelAdmin(CustomIdUser, custom_site)
abc = CharPK.objects.create(char_pk="abc")
abcd = CharPK.objects.create(char_pk="abcd")
m = admin.ModelAdmin(CharPK, custom_site)
m.search_fields = ["pk__exact"]
request = self.factory.get("/", data={SEARCH_VAR: "abc"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [abc])
request = self.factory.get("/", data={SEARCH_VAR: "abcd"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [abcd])
def test_no_exists_for_m2m_in_list_filter_without_params(self):
"""
If a ManyToManyField is in list_filter but isn't in any lookup params,
the changelist's query shouldn't have Exists().
"""
m = BandAdmin(Band, custom_site)
for lookup_params in ({}, {"name": "test"}):
request = self.factory.get("/band/", lookup_params)
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertNotIn(" EXISTS", str(cl.queryset.query))
# A ManyToManyField in params does have Exists() applied.
request = self.factory.get("/band/", {"genres": "0"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertIn(" EXISTS", str(cl.queryset.query))
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name="anything")
for i in range(1, 31):
Child.objects.create(name="name %s" % i, parent=parent)
Child.objects.create(name="filtered %s" % i, parent=parent)
request = self.factory.get("/child/")
request.user = self.superuser
# Test default queryset
m = ChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
def test_computed_list_display_localization(self):
"""
Regression test for #13196: output of functions should be localized
in the changelist.
"""
self.client.force_login(self.superuser)
event = Event.objects.create(date=datetime.date.today())
response = self.client.get(reverse("admin:admin_changelist_event_changelist"))
self.assertContains(response, formats.localize(event.date))
self.assertNotContains(response, str(event.date))
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name="parent")
for i in range(10):
Child.objects.create(name="child %s" % i, parent=parent)
user_noparents = self._create_superuser("noparents")
user_parents = self._create_superuser("parents")
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request("/child/", user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, "Parent object")
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ["name", "age"])
self.assertEqual(list_display_links, ["name"])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request("/child/", user_parents)
response = m.changelist_view(request)
self.assertContains(response, "Parent object")
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ("parent", "name", "age"))
self.assertEqual(list_display_links, ["parent"])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request("/child/", user_noparents)
response = m.changelist_view(request)
self.assertContains(response, "Parent object")
def test_show_all(self):
parent = Parent.objects.create(name="anything")
for i in range(1, 31):
Child.objects.create(name="name %s" % i, parent=parent)
Child.objects.create(name="filtered %s" % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get("/child/", data={ALL_VAR: ""})
request.user = self.superuser
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, custom_site)
m.list_max_show_all = 200
# 200 is the max we'll pass to ChangeList
cl = m.get_changelist_instance(request)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, custom_site)
m.list_max_show_all = 30
# 30 is the max we'll pass to ChangeList for this test
cl = m.get_changelist_instance(request)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name="parent")
for i in range(1, 10):
Child.objects.create(id=i, name="child %s" % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, custom_site)
superuser = self._create_superuser("superuser")
request = self._mocked_authenticated_request("/child/", superuser)
response = m.changelist_view(request)
for i in range(1, 10):
link = reverse("admin:admin_changelist_child_change", args=(i,))
self.assertContains(response, '<a href="%s">%s</a>' % (link, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ("parent", "name", "age"))
self.assertEqual(list_display_links, ["age"])
def test_no_list_display_links(self):
"""#15185 -- Allow no links from the 'change list' view grid."""
p = Parent.objects.create(name="parent")
m = NoListDisplayLinksParentAdmin(Parent, custom_site)
superuser = self._create_superuser("superuser")
request = self._mocked_authenticated_request("/parent/", superuser)
response = m.changelist_view(request)
link = reverse("admin:admin_changelist_parent_change", args=(p.pk,))
self.assertNotContains(response, '<a href="%s">' % link)
def test_clear_all_filters_link(self):
self.client.force_login(self.superuser)
url = reverse("admin:auth_user_changelist")
response = self.client.get(url)
self.assertNotContains(response, "✖ Clear all filters")
link = '<a href="%s">✖ Clear all filters</a>'
for data, href in (
({"is_staff__exact": "0"}, "?"),
(
{"is_staff__exact": "0", "username__startswith": "test"},
"?username__startswith=test",
),
(
{"is_staff__exact": "0", SEARCH_VAR: "test"},
"?%s=test" % SEARCH_VAR,
),
(
{"is_staff__exact": "0", IS_POPUP_VAR: "id"},
"?%s=id" % IS_POPUP_VAR,
),
):
with self.subTest(data=data):
response = self.client.get(url, data=data)
self.assertContains(response, link % href)
def test_clear_all_filters_link_callable_filter(self):
self.client.force_login(self.superuser)
url = reverse("admin:admin_changelist_band_changelist")
response = self.client.get(url)
self.assertNotContains(response, "✖ Clear all filters")
link = '<a href="%s">✖ Clear all filters</a>'
for data, href in (
({"nr_of_members_partition": "5"}, "?"),
(
{"nr_of_members_partition": "more", "name__startswith": "test"},
"?name__startswith=test",
),
(
{"nr_of_members_partition": "5", IS_POPUP_VAR: "id"},
"?%s=id" % IS_POPUP_VAR,
),
):
with self.subTest(data=data):
response = self.client.get(url, data=data)
self.assertContains(response, link % href)
def test_no_clear_all_filters_link(self):
self.client.force_login(self.superuser)
url = reverse("admin:auth_user_changelist")
link = ">✖ Clear all filters</a>"
for data in (
{SEARCH_VAR: "test"},
{ORDER_VAR: "-1"},
{TO_FIELD_VAR: "id"},
{PAGE_VAR: "1"},
{IS_POPUP_VAR: "1"},
{"username__startswith": "test"},
):
with self.subTest(data=data):
response = self.client.get(url, data=data)
self.assertNotContains(response, link)
def test_tuple_list_display(self):
swallow = Swallow.objects.create(origin="Africa", load="12.34", speed="22.2")
swallow2 = Swallow.objects.create(origin="Africa", load="12.34", speed="22.2")
swallow_o2o = SwallowOneToOne.objects.create(swallow=swallow2)
model_admin = SwallowAdmin(Swallow, custom_site)
superuser = self._create_superuser("superuser")
request = self._mocked_authenticated_request("/swallow/", superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, str(swallow.origin))
self.assertContains(response, str(swallow.load))
self.assertContains(response, str(swallow.speed))
# Reverse one-to-one relations should work.
self.assertContains(response, '<td class="field-swallowonetoone">-</td>')
self.assertContains(
response, '<td class="field-swallowonetoone">%s</td>' % swallow_o2o
)
def test_multiuser_edit(self):
"""
Simultaneous edits of list_editable fields on the changelist by
different users must not result in one user's edits creating a new
object instead of modifying the correct existing object (#11313).
"""
# To replicate this issue, simulate the following steps:
# 1. User1 opens an admin changelist with list_editable fields.
# 2. User2 edits object "Foo" such that it moves to another page in
# the pagination order and saves.
# 3. User1 edits object "Foo" and saves.
# 4. The edit made by User1 does not get applied to object "Foo" but
# instead is used to create a new object (bug).
# For this test, order the changelist by the 'speed' attribute and
# display 3 objects per page (SwallowAdmin.list_per_page = 3).
# Setup the test to reflect the DB state after step 2 where User2 has
# edited the first swallow object's speed from '4' to '1'.
a = Swallow.objects.create(origin="Swallow A", load=4, speed=1)
b = Swallow.objects.create(origin="Swallow B", load=2, speed=2)
c = Swallow.objects.create(origin="Swallow C", load=5, speed=5)
d = Swallow.objects.create(origin="Swallow D", load=9, speed=9)
superuser = self._create_superuser("superuser")
self.client.force_login(superuser)
changelist_url = reverse("admin:admin_changelist_swallow_changelist")
# Send the POST from User1 for step 3. It's still using the changelist
# ordering from before User2's edits in step 2.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-uuid": str(d.pk),
"form-1-uuid": str(c.pk),
"form-2-uuid": str(a.pk),
"form-0-load": "9.0",
"form-0-speed": "9.0",
"form-1-load": "5.0",
"form-1-speed": "5.0",
"form-2-load": "5.0",
"form-2-speed": "4.0",
"_save": "Save",
}
response = self.client.post(
changelist_url, data, follow=True, extra={"o": "-2"}
)
# The object User1 edited in step 3 is displayed on the changelist and
# has the correct edits applied.
self.assertContains(response, "1 swallow was changed successfully.")
self.assertContains(response, a.origin)
a.refresh_from_db()
self.assertEqual(a.load, float(data["form-2-load"]))
self.assertEqual(a.speed, float(data["form-2-speed"]))
b.refresh_from_db()
self.assertEqual(b.load, 2)
self.assertEqual(b.speed, 2)
c.refresh_from_db()
self.assertEqual(c.load, float(data["form-1-load"]))
self.assertEqual(c.speed, float(data["form-1-speed"]))
d.refresh_from_db()
self.assertEqual(d.load, float(data["form-0-load"]))
self.assertEqual(d.speed, float(data["form-0-speed"]))
# No new swallows were created.
self.assertEqual(len(Swallow.objects.all()), 4)
def test_get_edited_object_ids(self):
a = Swallow.objects.create(origin="Swallow A", load=4, speed=1)
b = Swallow.objects.create(origin="Swallow B", load=2, speed=2)
c = Swallow.objects.create(origin="Swallow C", load=5, speed=5)
superuser = self._create_superuser("superuser")
self.client.force_login(superuser)
changelist_url = reverse("admin:admin_changelist_swallow_changelist")
m = SwallowAdmin(Swallow, custom_site)
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-uuid": str(a.pk),
"form-1-uuid": str(b.pk),
"form-2-uuid": str(c.pk),
"form-0-load": "9.0",
"form-0-speed": "9.0",
"form-1-load": "5.0",
"form-1-speed": "5.0",
"form-2-load": "5.0",
"form-2-speed": "4.0",
"_save": "Save",
}
request = self.factory.post(changelist_url, data=data)
pks = m._get_edited_object_pks(request, prefix="form")
self.assertEqual(sorted(pks), sorted([str(a.pk), str(b.pk), str(c.pk)]))
def test_get_list_editable_queryset(self):
a = Swallow.objects.create(origin="Swallow A", load=4, speed=1)
Swallow.objects.create(origin="Swallow B", load=2, speed=2)
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "2",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-uuid": str(a.pk),
"form-0-load": "10",
"_save": "Save",
}
superuser = self._create_superuser("superuser")
self.client.force_login(superuser)
changelist_url = reverse("admin:admin_changelist_swallow_changelist")
m = SwallowAdmin(Swallow, custom_site)
request = self.factory.post(changelist_url, data=data)
queryset = m._get_list_editable_queryset(request, prefix="form")
self.assertEqual(queryset.count(), 1)
data["form-0-uuid"] = "INVALD_PRIMARY_KEY"
# The unfiltered queryset is returned if there's invalid data.
request = self.factory.post(changelist_url, data=data)
queryset = m._get_list_editable_queryset(request, prefix="form")
self.assertEqual(queryset.count(), 2)
def test_get_list_editable_queryset_with_regex_chars_in_prefix(self):
a = Swallow.objects.create(origin="Swallow A", load=4, speed=1)
Swallow.objects.create(origin="Swallow B", load=2, speed=2)
data = {
"form$-TOTAL_FORMS": "2",
"form$-INITIAL_FORMS": "2",
"form$-MIN_NUM_FORMS": "0",
"form$-MAX_NUM_FORMS": "1000",
"form$-0-uuid": str(a.pk),
"form$-0-load": "10",
"_save": "Save",
}
superuser = self._create_superuser("superuser")
self.client.force_login(superuser)
changelist_url = reverse("admin:admin_changelist_swallow_changelist")
m = SwallowAdmin(Swallow, custom_site)
request = self.factory.post(changelist_url, data=data)
queryset = m._get_list_editable_queryset(request, prefix="form$")
self.assertEqual(queryset.count(), 1)
def test_changelist_view_list_editable_changed_objects_uses_filter(self):
"""list_editable edits use a filtered queryset to limit memory usage."""
a = Swallow.objects.create(origin="Swallow A", load=4, speed=1)
Swallow.objects.create(origin="Swallow B", load=2, speed=2)
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "2",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-uuid": str(a.pk),
"form-0-load": "10",
"_save": "Save",
}
superuser = self._create_superuser("superuser")
self.client.force_login(superuser)
changelist_url = reverse("admin:admin_changelist_swallow_changelist")
with CaptureQueriesContext(connection) as context:
response = self.client.post(changelist_url, data=data)
self.assertEqual(response.status_code, 200)
self.assertIn("WHERE", context.captured_queries[4]["sql"])
self.assertIn("IN", context.captured_queries[4]["sql"])
# Check only the first few characters since the UUID may have dashes.
self.assertIn(str(a.pk)[:8], context.captured_queries[4]["sql"])
def test_deterministic_order_for_unordered_model(self):
"""
The primary key is used in the ordering of the changelist's results to
guarantee a deterministic order, even when the model doesn't have any
default ordering defined (#17198).
"""
superuser = self._create_superuser("superuser")
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(1, 6):
request = self._mocked_authenticated_request(
"/unorderedobject/?p=%s" % page, superuser
)
response = model_admin.changelist_view(request)
for result in response.context_data["cl"].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by '-pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
UnorderedObjectAdmin.ordering = ["bool"]
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ["bool", "-pk"]
check_results_order()
UnorderedObjectAdmin.ordering = ["bool", "pk"]
check_results_order(ascending=True)
UnorderedObjectAdmin.ordering = ["-id", "bool"]
check_results_order()
UnorderedObjectAdmin.ordering = ["id", "bool"]
check_results_order(ascending=True)
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
The primary key is used in the ordering of the changelist's results to
guarantee a deterministic order, even when the model has a manager that
defines a default ordering (#17198).
"""
superuser = self._create_superuser("superuser")
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(1, 6):
request = self._mocked_authenticated_request(
"/orderedobject/?p=%s" % page, superuser
)
response = model_admin.changelist_view(request)
for result in response.context_data["cl"].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering
# (i.e. 'number').
check_results_order(ascending=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
OrderedObjectAdmin.ordering = ["bool"]
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ["bool", "-pk"]
check_results_order()
OrderedObjectAdmin.ordering = ["bool", "pk"]
check_results_order(ascending=True)
OrderedObjectAdmin.ordering = ["-id", "bool"]
check_results_order()
OrderedObjectAdmin.ordering = ["id", "bool"]
check_results_order(ascending=True)
@isolate_apps("admin_changelist")
def test_total_ordering_optimization(self):
class Related(models.Model):
unique_field = models.BooleanField(unique=True)
class Meta:
ordering = ("unique_field",)
class Model(models.Model):
unique_field = models.BooleanField(unique=True)
unique_nullable_field = models.BooleanField(unique=True, null=True)
related = models.ForeignKey(Related, models.CASCADE)
other_related = models.ForeignKey(Related, models.CASCADE)
related_unique = models.OneToOneField(Related, models.CASCADE)
field = models.BooleanField()
other_field = models.BooleanField()
null_field = models.BooleanField(null=True)
class Meta:
unique_together = {
("field", "other_field"),
("field", "null_field"),
("related", "other_related_id"),
}
class ModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return Model.objects.none()
request = self._mocked_authenticated_request("/", self.superuser)
site = admin.AdminSite(name="admin")
model_admin = ModelAdmin(Model, site)
change_list = model_admin.get_changelist_instance(request)
tests = (
([], ["-pk"]),
# Unique non-nullable field.
(["unique_field"], ["unique_field"]),
(["-unique_field"], ["-unique_field"]),
# Unique nullable field.
(["unique_nullable_field"], ["unique_nullable_field", "-pk"]),
# Field.
(["field"], ["field", "-pk"]),
# Related field introspection is not implemented.
(["related__unique_field"], ["related__unique_field", "-pk"]),
# Related attname unique.
(["related_unique_id"], ["related_unique_id"]),
# Related ordering introspection is not implemented.
(["related_unique"], ["related_unique", "-pk"]),
# Composite unique.
(["field", "-other_field"], ["field", "-other_field"]),
# Composite unique nullable.
(["-field", "null_field"], ["-field", "null_field", "-pk"]),
# Composite unique and nullable.
(
["-field", "null_field", "other_field"],
["-field", "null_field", "other_field"],
),
# Composite unique attnames.
(["related_id", "-other_related_id"], ["related_id", "-other_related_id"]),
# Composite unique names.
(["related", "-other_related_id"], ["related", "-other_related_id", "-pk"]),
)
# F() objects composite unique.
total_ordering = [F("field"), F("other_field").desc(nulls_last=True)]
# F() objects composite unique nullable.
non_total_ordering = [F("field"), F("null_field").desc(nulls_last=True)]
tests += (
(total_ordering, total_ordering),
(non_total_ordering, non_total_ordering + ["-pk"]),
)
for ordering, expected in tests:
with self.subTest(ordering=ordering):
self.assertEqual(
change_list._get_deterministic_ordering(ordering), expected
)
@isolate_apps("admin_changelist")
def test_total_ordering_optimization_meta_constraints(self):
class Related(models.Model):
unique_field = models.BooleanField(unique=True)
class Meta:
ordering = ("unique_field",)
class Model(models.Model):
field_1 = models.BooleanField()
field_2 = models.BooleanField()
field_3 = models.BooleanField()
field_4 = models.BooleanField()
field_5 = models.BooleanField()
field_6 = models.BooleanField()
nullable_1 = models.BooleanField(null=True)
nullable_2 = models.BooleanField(null=True)
related_1 = models.ForeignKey(Related, models.CASCADE)
related_2 = models.ForeignKey(Related, models.CASCADE)
related_3 = models.ForeignKey(Related, models.CASCADE)
related_4 = models.ForeignKey(Related, models.CASCADE)
class Meta:
constraints = [
*[
models.UniqueConstraint(fields=fields, name="".join(fields))
for fields in (
["field_1"],
["nullable_1"],
["related_1"],
["related_2_id"],
["field_2", "field_3"],
["field_2", "nullable_2"],
["field_2", "related_3"],
["field_3", "related_4_id"],
)
],
models.CheckConstraint(check=models.Q(id__gt=0), name="foo"),
models.UniqueConstraint(
fields=["field_5"],
condition=models.Q(id__gt=10),
name="total_ordering_1",
),
models.UniqueConstraint(
fields=["field_6"],
condition=models.Q(),
name="total_ordering",
),
]
class ModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return Model.objects.none()
request = self._mocked_authenticated_request("/", self.superuser)
site = admin.AdminSite(name="admin")
model_admin = ModelAdmin(Model, site)
change_list = model_admin.get_changelist_instance(request)
tests = (
# Unique non-nullable field.
(["field_1"], ["field_1"]),
# Unique nullable field.
(["nullable_1"], ["nullable_1", "-pk"]),
# Related attname unique.
(["related_1_id"], ["related_1_id"]),
(["related_2_id"], ["related_2_id"]),
# Related ordering introspection is not implemented.
(["related_1"], ["related_1", "-pk"]),
# Composite unique.
(["-field_2", "field_3"], ["-field_2", "field_3"]),
# Composite unique nullable.
(["field_2", "-nullable_2"], ["field_2", "-nullable_2", "-pk"]),
# Composite unique and nullable.
(
["field_2", "-nullable_2", "field_3"],
["field_2", "-nullable_2", "field_3"],
),
# Composite field and related field name.
(["field_2", "-related_3"], ["field_2", "-related_3", "-pk"]),
(["field_3", "related_4"], ["field_3", "related_4", "-pk"]),
# Composite field and related field attname.
(["field_2", "related_3_id"], ["field_2", "related_3_id"]),
(["field_3", "-related_4_id"], ["field_3", "-related_4_id"]),
# Partial unique constraint is ignored.
(["field_5"], ["field_5", "-pk"]),
# Unique constraint with an empty condition.
(["field_6"], ["field_6"]),
)
for ordering, expected in tests:
with self.subTest(ordering=ordering):
self.assertEqual(
change_list._get_deterministic_ordering(ordering), expected
)
def test_dynamic_list_filter(self):
"""
Regression tests for ticket #17646: dynamic list_filter support.
"""
parent = Parent.objects.create(name="parent")
for i in range(10):
Child.objects.create(name="child %s" % i, parent=parent)
user_noparents = self._create_superuser("noparents")
user_parents = self._create_superuser("parents")
# Test with user 'noparents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request("/child/", user_noparents)
response = m.changelist_view(request)
self.assertEqual(response.context_data["cl"].list_filter, ["name", "age"])
# Test with user 'parents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request("/child/", user_parents)
response = m.changelist_view(request)
self.assertEqual(
response.context_data["cl"].list_filter, ("parent", "name", "age")
)
def test_dynamic_search_fields(self):
child = self._create_superuser("child")
m = DynamicSearchFieldsChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request("/child/", child)
response = m.changelist_view(request)
self.assertEqual(response.context_data["cl"].search_fields, ("name", "age"))
def test_pagination_page_range(self):
"""
Regression tests for ticket #15653: ensure the number of pages
generated for changelist views are correct.
"""
# instantiating and setting up ChangeList object
m = GroupAdmin(Group, custom_site)
request = self.factory.get("/group/")
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.list_per_page = 10
ELLIPSIS = cl.paginator.ELLIPSIS
for number, pages, expected in [
(1, 1, []),
(1, 2, [1, 2]),
(6, 11, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]),
(6, 12, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
(6, 13, [1, 2, 3, 4, 5, 6, 7, 8, 9, ELLIPSIS, 12, 13]),
(7, 12, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
(7, 13, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]),
(7, 14, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, ELLIPSIS, 13, 14]),
(8, 13, [1, 2, ELLIPSIS, 5, 6, 7, 8, 9, 10, 11, 12, 13]),
(8, 14, [1, 2, ELLIPSIS, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]),
(8, 15, [1, 2, ELLIPSIS, 5, 6, 7, 8, 9, 10, 11, ELLIPSIS, 14, 15]),
]:
with self.subTest(number=number, pages=pages):
# assuming exactly `pages * cl.list_per_page` objects
Group.objects.all().delete()
for i in range(pages * cl.list_per_page):
Group.objects.create(name="test band")
# setting page number and calculating page range
cl.page_num = number
cl.get_results(request)
self.assertEqual(list(pagination(cl)["page_range"]), expected)
def test_object_tools_displayed_no_add_permission(self):
"""
When ModelAdmin.has_add_permission() returns False, the object-tools
block is still shown.
"""
superuser = self._create_superuser("superuser")
m = EventAdmin(Event, custom_site)
request = self._mocked_authenticated_request("/event/", superuser)
self.assertFalse(m.has_add_permission(request))
response = m.changelist_view(request)
self.assertIn('<ul class="object-tools">', response.rendered_content)
# The "Add" button inside the object-tools shouldn't appear.
self.assertNotIn("Add ", response.rendered_content)
def test_search_help_text(self):
superuser = self._create_superuser("superuser")
m = BandAdmin(Band, custom_site)
# search_fields without search_help_text.
m.search_fields = ["name"]
request = self._mocked_authenticated_request("/band/", superuser)
response = m.changelist_view(request)
self.assertIsNone(response.context_data["cl"].search_help_text)
self.assertNotContains(response, '<div class="help id="searchbar_helptext">')
# search_fields with search_help_text.
m.search_help_text = "Search help text"
request = self._mocked_authenticated_request("/band/", superuser)
response = m.changelist_view(request)
self.assertEqual(
response.context_data["cl"].search_help_text, "Search help text"
)
self.assertContains(
response, '<div class="help" id="searchbar_helptext">Search help text</div>'
)
self.assertContains(
response,
'<input type="text" size="40" name="q" value="" id="searchbar" '
'aria-describedby="searchbar_helptext">',
)
class GetAdminLogTests(TestCase):
def test_custom_user_pk_not_named_id(self):
"""
{% get_admin_log %} works if the user model's primary key isn't named
'id'.
"""
context = Context({"user": CustomIdUser()})
template = Template(
"{% load log %}{% get_admin_log 10 as admin_log for_user user %}"
)
# This template tag just logs.
self.assertEqual(template.render(context), "")
def test_no_user(self):
"""{% get_admin_log %} works without specifying a user."""
user = User(username="jondoe", password="secret", email="[email protected]")
user.save()
ct = ContentType.objects.get_for_model(User)
LogEntry.objects.log_action(user.pk, ct.pk, user.pk, repr(user), 1)
t = Template(
"{% load log %}"
"{% get_admin_log 100 as admin_log %}"
"{% for entry in admin_log %}"
"{{ entry|safe }}"
"{% endfor %}"
)
self.assertEqual(t.render(Context({})), "Added “<User: jondoe>”.")
def test_missing_args(self):
msg = "'get_admin_log' statements require two arguments"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
Template("{% load log %}{% get_admin_log 10 as %}")
def test_non_integer_limit(self):
msg = "First argument to 'get_admin_log' must be an integer"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
Template(
'{% load log %}{% get_admin_log "10" as admin_log for_user user %}'
)
def test_without_as(self):
msg = "Second argument to 'get_admin_log' must be 'as'"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
Template("{% load log %}{% get_admin_log 10 ad admin_log for_user user %}")
def test_without_for_user(self):
msg = "Fourth argument to 'get_admin_log' must be 'for_user'"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
Template("{% load log %}{% get_admin_log 10 as admin_log foruser user %}")
@override_settings(ROOT_URLCONF="admin_changelist.urls")
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ["admin_changelist"] + AdminSeleniumTestCase.available_apps
def setUp(self):
User.objects.create_superuser(username="super", password="secret", email=None)
def test_add_row_selection(self):
"""
The status line for selected rows gets updated correctly (#22038).
"""
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret")
self.selenium.get(self.live_server_url + reverse("admin:auth_user_changelist"))
form_id = "#changelist-form"
# Test amount of rows in the Changelist
rows = self.selenium.find_elements(
By.CSS_SELECTOR, "%s #result_list tbody tr" % form_id
)
self.assertEqual(len(rows), 1)
row = rows[0]
selection_indicator = self.selenium.find_element(
By.CSS_SELECTOR, "%s .action-counter" % form_id
)
all_selector = self.selenium.find_element(By.ID, "action-toggle")
row_selector = self.selenium.find_element(
By.CSS_SELECTOR,
"%s #result_list tbody tr:first-child .action-select" % form_id,
)
# Test current selection
self.assertEqual(selection_indicator.text, "0 of 1 selected")
self.assertIs(all_selector.get_property("checked"), False)
self.assertEqual(row.get_attribute("class"), "")
# Select a row and check again
row_selector.click()
self.assertEqual(selection_indicator.text, "1 of 1 selected")
self.assertIs(all_selector.get_property("checked"), True)
self.assertEqual(row.get_attribute("class"), "selected")
# Deselect a row and check again
row_selector.click()
self.assertEqual(selection_indicator.text, "0 of 1 selected")
self.assertIs(all_selector.get_property("checked"), False)
self.assertEqual(row.get_attribute("class"), "")
def test_modifier_allows_multiple_section(self):
"""
Selecting a row and then selecting another row whilst holding shift
should select all rows in-between.
"""
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
Parent.objects.bulk_create([Parent(name="parent%d" % i) for i in range(5)])
self.admin_login(username="super", password="secret")
self.selenium.get(
self.live_server_url + reverse("admin:admin_changelist_parent_changelist")
)
checkboxes = self.selenium.find_elements(
By.CSS_SELECTOR, "tr input.action-select"
)
self.assertEqual(len(checkboxes), 5)
for c in checkboxes:
self.assertIs(c.get_property("checked"), False)
# Check first row. Hold-shift and check next-to-last row.
checkboxes[0].click()
ActionChains(self.selenium).key_down(Keys.SHIFT).click(checkboxes[-2]).key_up(
Keys.SHIFT
).perform()
for c in checkboxes[:-2]:
self.assertIs(c.get_property("checked"), True)
self.assertIs(checkboxes[-1].get_property("checked"), False)
def test_select_all_across_pages(self):
from selenium.webdriver.common.by import By
Parent.objects.bulk_create([Parent(name="parent%d" % i) for i in range(101)])
self.admin_login(username="super", password="secret")
self.selenium.get(
self.live_server_url + reverse("admin:admin_changelist_parent_changelist")
)
selection_indicator = self.selenium.find_element(
By.CSS_SELECTOR, ".action-counter"
)
select_all_indicator = self.selenium.find_element(
By.CSS_SELECTOR, ".actions .all"
)
question = self.selenium.find_element(By.CSS_SELECTOR, ".actions > .question")
clear = self.selenium.find_element(By.CSS_SELECTOR, ".actions > .clear")
select_all = self.selenium.find_element(By.ID, "action-toggle")
select_across = self.selenium.find_elements(By.NAME, "select_across")
self.assertIs(question.is_displayed(), False)
self.assertIs(clear.is_displayed(), False)
self.assertIs(select_all.get_property("checked"), False)
for hidden_input in select_across:
self.assertEqual(hidden_input.get_property("value"), "0")
self.assertIs(selection_indicator.is_displayed(), True)
self.assertEqual(selection_indicator.text, "0 of 100 selected")
self.assertIs(select_all_indicator.is_displayed(), False)
select_all.click()
self.assertIs(question.is_displayed(), True)
self.assertIs(clear.is_displayed(), False)
self.assertIs(select_all.get_property("checked"), True)
for hidden_input in select_across:
self.assertEqual(hidden_input.get_property("value"), "0")
self.assertIs(selection_indicator.is_displayed(), True)
self.assertEqual(selection_indicator.text, "100 of 100 selected")
self.assertIs(select_all_indicator.is_displayed(), False)
question.click()
self.assertIs(question.is_displayed(), False)
self.assertIs(clear.is_displayed(), True)
self.assertIs(select_all.get_property("checked"), True)
for hidden_input in select_across:
self.assertEqual(hidden_input.get_property("value"), "1")
self.assertIs(selection_indicator.is_displayed(), False)
self.assertIs(select_all_indicator.is_displayed(), True)
clear.click()
self.assertIs(question.is_displayed(), False)
self.assertIs(clear.is_displayed(), False)
self.assertIs(select_all.get_property("checked"), False)
for hidden_input in select_across:
self.assertEqual(hidden_input.get_property("value"), "0")
self.assertIs(selection_indicator.is_displayed(), True)
self.assertEqual(selection_indicator.text, "0 of 100 selected")
self.assertIs(select_all_indicator.is_displayed(), False)
def test_actions_warn_on_pending_edits(self):
from selenium.webdriver.common.by import By
Parent.objects.create(name="foo")
self.admin_login(username="super", password="secret")
self.selenium.get(
self.live_server_url + reverse("admin:admin_changelist_parent_changelist")
)
name_input = self.selenium.find_element(By.ID, "id_form-0-name")
name_input.clear()
name_input.send_keys("bar")
self.selenium.find_element(By.ID, "action-toggle").click()
self.selenium.find_element(By.NAME, "index").click() # Go
alert = self.selenium.switch_to.alert
try:
self.assertEqual(
alert.text,
"You have unsaved changes on individual editable fields. If you "
"run an action, your unsaved changes will be lost.",
)
finally:
alert.dismiss()
def test_save_with_changes_warns_on_pending_action(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
Parent.objects.create(name="parent")
self.admin_login(username="super", password="secret")
self.selenium.get(
self.live_server_url + reverse("admin:admin_changelist_parent_changelist")
)
name_input = self.selenium.find_element(By.ID, "id_form-0-name")
name_input.clear()
name_input.send_keys("other name")
Select(self.selenium.find_element(By.NAME, "action")).select_by_value(
"delete_selected"
)
self.selenium.find_element(By.NAME, "_save").click()
alert = self.selenium.switch_to.alert
try:
self.assertEqual(
alert.text,
"You have selected an action, but you haven’t saved your "
"changes to individual fields yet. Please click OK to save. "
"You’ll need to re-run the action.",
)
finally:
alert.dismiss()
def test_save_without_changes_warns_on_pending_action(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
Parent.objects.create(name="parent")
self.admin_login(username="super", password="secret")
self.selenium.get(
self.live_server_url + reverse("admin:admin_changelist_parent_changelist")
)
Select(self.selenium.find_element(By.NAME, "action")).select_by_value(
"delete_selected"
)
self.selenium.find_element(By.NAME, "_save").click()
alert = self.selenium.switch_to.alert
try:
self.assertEqual(
alert.text,
"You have selected an action, and you haven’t made any "
"changes on individual fields. You’re probably looking for "
"the Go button rather than the Save button.",
)
finally:
alert.dismiss()
def test_collapse_filters(self):
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret")
self.selenium.get(self.live_server_url + reverse("admin:auth_user_changelist"))
# The UserAdmin has 3 field filters by default: "staff status",
# "superuser status", and "active".
details = self.selenium.find_elements(By.CSS_SELECTOR, "details")
# All filters are opened at first.
for detail in details:
self.assertTrue(detail.get_attribute("open"))
# Collapse "staff' and "superuser" filters.
for detail in details[:2]:
summary = detail.find_element(By.CSS_SELECTOR, "summary")
summary.click()
self.assertFalse(detail.get_attribute("open"))
# Filters are in the same state after refresh.
self.selenium.refresh()
self.assertFalse(
self.selenium.find_element(
By.CSS_SELECTOR, "[data-filter-title='staff status']"
).get_attribute("open")
)
self.assertFalse(
self.selenium.find_element(
By.CSS_SELECTOR, "[data-filter-title='superuser status']"
).get_attribute("open")
)
self.assertTrue(
self.selenium.find_element(
By.CSS_SELECTOR, "[data-filter-title='active']"
).get_attribute("open")
)
# Collapse a filter on another view (Bands).
self.selenium.get(
self.live_server_url + reverse("admin:admin_changelist_band_changelist")
)
self.selenium.find_element(By.CSS_SELECTOR, "summary").click()
# Go to Users view and then, back again to Bands view.
self.selenium.get(self.live_server_url + reverse("admin:auth_user_changelist"))
self.selenium.get(
self.live_server_url + reverse("admin:admin_changelist_band_changelist")
)
# The filter remains in the same state.
self.assertFalse(
self.selenium.find_element(
By.CSS_SELECTOR,
"[data-filter-title='number of members']",
).get_attribute("open")
)
|
79e601d327e418b8bfc6444f0bf6c313272845de17cf90294f78e78ec5f12e89 | import datetime
import math
import re
from decimal import Decimal
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
Avg,
Case,
Count,
DateField,
DateTimeField,
DecimalField,
DurationField,
Exists,
F,
FloatField,
IntegerField,
Max,
Min,
OuterRef,
Q,
StdDev,
Subquery,
Sum,
TimeField,
Value,
Variance,
When,
)
from django.db.models.expressions import Func, RawSQL
from django.db.models.functions import (
Cast,
Coalesce,
Greatest,
Now,
Pi,
TruncDate,
TruncHour,
)
from django.test import TestCase
from django.test.testcases import skipUnlessDBFeature
from django.test.utils import Approximate, CaptureQueriesContext
from django.utils import timezone
from .models import Author, Book, Publisher, Store
class NowUTC(Now):
template = "CURRENT_TIMESTAMP"
output_field = DateTimeField()
def as_sql(self, compiler, connection, **extra_context):
if connection.features.test_now_utc_template:
extra_context["template"] = connection.features.test_now_utc_template
return super().as_sql(compiler, connection, **extra_context)
class AggregateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name="Adrian Holovaty", age=34)
cls.a2 = Author.objects.create(name="Jacob Kaplan-Moss", age=35)
cls.a3 = Author.objects.create(name="Brad Dayley", age=45)
cls.a4 = Author.objects.create(name="James Bennett", age=29)
cls.a5 = Author.objects.create(name="Jeffrey Forcier", age=37)
cls.a6 = Author.objects.create(name="Paul Bissex", age=29)
cls.a7 = Author.objects.create(name="Wesley J. Chun", age=25)
cls.a8 = Author.objects.create(name="Peter Norvig", age=57)
cls.a9 = Author.objects.create(name="Stuart Russell", age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(
name="Apress", num_awards=3, duration=datetime.timedelta(days=1)
)
cls.p2 = Publisher.objects.create(
name="Sams", num_awards=1, duration=datetime.timedelta(days=2)
)
cls.p3 = Publisher.objects.create(name="Prentice Hall", num_awards=7)
cls.p4 = Publisher.objects.create(name="Morgan Kaufmann", num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn="159059725",
name="The Definitive Guide to Django: Web Development Done Right",
pages=447,
rating=4.5,
price=Decimal("30.00"),
contact=cls.a1,
publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6),
)
cls.b2 = Book.objects.create(
isbn="067232959",
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
rating=3.0,
price=Decimal("23.09"),
contact=cls.a3,
publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3),
)
cls.b3 = Book.objects.create(
isbn="159059996",
name="Practical Django Projects",
pages=300,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a4,
publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23),
)
cls.b4 = Book.objects.create(
isbn="013235613",
name="Python Web Development with Django",
pages=350,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a5,
publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3),
)
cls.b5 = Book.objects.create(
isbn="013790395",
name="Artificial Intelligence: A Modern Approach",
pages=1132,
rating=4.0,
price=Decimal("82.80"),
contact=cls.a8,
publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15),
)
cls.b6 = Book.objects.create(
isbn="155860191",
name=(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp"
),
pages=946,
rating=5.0,
price=Decimal("75.00"),
contact=cls.a8,
publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15),
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name="Amazon.com",
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59),
)
s2 = Store.objects.create(
name="Books.com",
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59),
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30),
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_empty_aggregate(self):
self.assertEqual(Author.objects.aggregate(), {})
def test_aggregate_in_order_by(self):
msg = (
"Using an aggregate in order_by() without also including it in "
"annotate() is not allowed: Avg(F(book__rating)"
)
with self.assertRaisesMessage(FieldError, msg):
Author.objects.values("age").order_by(Avg("book__rating"))
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(
vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)}
)
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(vals, {"age__sum": 254})
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(vals, {"friends__age__avg": Approximate(34.07, places=2)})
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(vals, {"authors__age__avg": Approximate(38.2857, places=2)})
vals = Author.objects.filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(vals, {"book__rating__avg": 4.0})
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(vals, {"publisher__num_awards__sum": 30})
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(vals, {"book__price__sum": Decimal("270.27")})
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(vals, {"books__authors__age__max": 57})
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(vals, {"book__publisher__num_awards__min": 1})
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(
amazon_mean=Avg("books__rating")
)
self.assertEqual(vals, {"amazon_mean": Approximate(4.08, places=2)})
def test_aggregate_transform(self):
vals = Store.objects.aggregate(min_month=Min("original_opening__month"))
self.assertEqual(vals, {"min_month": 3})
def test_aggregate_join_transform(self):
vals = Publisher.objects.aggregate(min_year=Min("book__pubdate__year"))
self.assertEqual(vals, {"min_year": 1991})
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by("pk"),
[
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
],
lambda b: b.name,
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=self.b1.pk)
self.assertEqual(
b.name, "The Definitive Guide to Django: Web Development Done Right"
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_defer(self):
qs = (
Book.objects.annotate(page_sum=Sum("pages"))
.defer("name")
.filter(pk=self.b1.pk)
)
rows = [
(
self.b1.id,
"159059725",
447,
"The Definitive Guide to Django: Web Development Done Right",
)
]
self.assertQuerysetEqual(
qs.order_by("pk"), rows, lambda r: (r.id, r.isbn, r.page_sum, r.name)
)
def test_annotate_defer_select_related(self):
qs = (
Book.objects.select_related("contact")
.annotate(page_sum=Sum("pages"))
.defer("name")
.filter(pk=self.b1.pk)
)
rows = [
(
self.b1.id,
"159059725",
447,
"Adrian Holovaty",
"The Definitive Guide to Django: Web Development Done Right",
)
]
self.assertQuerysetEqual(
qs.order_by("pk"),
rows,
lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name),
)
def test_annotate_m2m(self):
books = (
Book.objects.filter(rating__lt=4.5)
.annotate(Avg("authors__age"))
.order_by("name")
)
self.assertQuerysetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 51.5),
("Practical Django Projects", 29.0),
("Python Web Development with Django", Approximate(30.3, places=1)),
("Sams Teach Yourself Django in 24 Hours", 45.0),
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 2),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
1,
),
("Practical Django Projects", 1),
("Python Web Development with Django", 3),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 2),
],
lambda b: (b.name, b.num_authors),
)
def test_backwards_m2m_annotate(self):
authors = (
Author.objects.filter(name__contains="a")
.annotate(Avg("book__rating"))
.order_by("name")
)
self.assertQuerysetEqual(
authors,
[
("Adrian Holovaty", 4.5),
("Brad Dayley", 3.0),
("Jacob Kaplan-Moss", 4.5),
("James Bennett", 4.0),
("Paul Bissex", 4.0),
("Stuart Russell", 4.0),
],
lambda a: (a.name, a.book__rating__avg),
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors,
[
("Adrian Holovaty", 1),
("Brad Dayley", 1),
("Jacob Kaplan-Moss", 1),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 1),
("Peter Norvig", 2),
("Stuart Russell", 1),
("Wesley J. Chun", 1),
],
lambda a: (a.name, a.num_books),
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 7),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
9,
),
("Practical Django Projects", 3),
("Python Web Development with Django", 7),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 3),
],
lambda b: (b.name, b.publisher__num_awards__sum),
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers,
[
("Apress", Decimal("59.69")),
("Jonno's House of Books", None),
("Morgan Kaufmann", Decimal("75.00")),
("Prentice Hall", Decimal("112.49")),
("Sams", Decimal("23.09")),
],
lambda p: (p.name, p.book__price__sum),
)
def test_annotate_values(self):
books = list(
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values()
)
self.assertEqual(
books,
[
{
"contact_id": self.a1.id,
"id": self.b1.id,
"isbn": "159059725",
"mean_age": 34.5,
"name": (
"The Definitive Guide to Django: Web Development Done Right"
),
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": self.p1.id,
"rating": 4.5,
}
],
)
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values("pk", "isbn", "mean_age")
)
self.assertEqual(
list(books),
[
{
"pk": self.b1.pk,
"isbn": "159059725",
"mean_age": 34.5,
}
],
)
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values("name")
)
self.assertEqual(
list(books),
[{"name": "The Definitive Guide to Django: Web Development Done Right"}],
)
books = (
Book.objects.filter(pk=self.b1.pk)
.values()
.annotate(mean_age=Avg("authors__age"))
)
self.assertEqual(
list(books),
[
{
"contact_id": self.a1.id,
"id": self.b1.id,
"isbn": "159059725",
"mean_age": 34.5,
"name": (
"The Definitive Guide to Django: Web Development Done Right"
),
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": self.p1.id,
"rating": 4.5,
}
],
)
books = (
Book.objects.values("rating")
.annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age"))
.order_by("rating")
)
self.assertEqual(
list(books),
[
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1),
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
},
],
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertQuerysetEqual(
authors,
[
("Adrian Holovaty", 32.0),
("Brad Dayley", None),
("Jacob Kaplan-Moss", 29.5),
("James Bennett", 34.0),
("Jeffrey Forcier", 27.0),
("Paul Bissex", 31.0),
("Peter Norvig", 46.0),
("Stuart Russell", 57.0),
("Wesley J. Chun", Approximate(33.66, places=1)),
],
lambda a: (a.name, a.friends__age__avg),
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
def test_count_star(self):
with self.assertNumQueries(1) as ctx:
Book.objects.aggregate(n=Count("*"))
sql = ctx.captured_queries[0]["sql"]
self.assertIn("SELECT COUNT(*) ", sql)
def test_count_distinct_expression(self):
aggs = Book.objects.aggregate(
distinct_ratings=Count(
Case(When(pages__gt=300, then="rating")), distinct=True
),
)
self.assertEqual(aggs["distinct_ratings"], 4)
def test_distinct_on_aggregate(self):
for aggregate, expected_result in (
(Avg, 4.125),
(Count, 4),
(Sum, 16.5),
):
with self.subTest(aggregate=aggregate.__name__):
books = Book.objects.aggregate(
ratings=aggregate("rating", distinct=True)
)
self.assertEqual(books["ratings"], expected_result)
def test_non_grouped_annotation_not_in_group_by(self):
"""
An annotation not included in values() before an aggregate should be
excluded from the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F("price"))
.filter(rating=4.0)
.values("rating")
.annotate(count=Count("publisher_id", distinct=True))
.values("count", "rating")
.order_by("count")
)
self.assertEqual(list(qs), [{"rating": 4.0, "count": 2}])
def test_grouped_annotation_in_group_by(self):
"""
An annotation included in values() before an aggregate should be
included in the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F("price"))
.filter(rating=4.0)
.values("rating", "xprice")
.annotate(count=Count("publisher_id", distinct=True))
.values("count", "rating")
.order_by("count")
)
self.assertEqual(
list(qs),
[
{"rating": 4.0, "count": 1},
{"rating": 4.0, "count": 2},
],
)
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count("book__id")))
implicit = list(Author.objects.annotate(Count("book")))
self.assertCountEqual(explicit, implicit)
def test_annotate_ordering(self):
books = (
Book.objects.values("rating")
.annotate(oldest=Max("authors__age"))
.order_by("oldest", "rating")
)
self.assertEqual(
list(books),
[
{"rating": 4.5, "oldest": 35},
{"rating": 3.0, "oldest": 45},
{"rating": 4.0, "oldest": 57},
{"rating": 5.0, "oldest": 57},
],
)
books = (
Book.objects.values("rating")
.annotate(oldest=Max("authors__age"))
.order_by("-oldest", "-rating")
)
self.assertEqual(
list(books),
[
{"rating": 5.0, "oldest": 57},
{"rating": 4.0, "oldest": 57},
{"rating": 3.0, "oldest": 45},
{"rating": 4.5, "oldest": 35},
],
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(
Avg("num_authors")
)
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_avg_duration_field(self):
# Explicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg("duration", output_field=DurationField())),
{"duration__avg": datetime.timedelta(days=1, hours=12)},
)
# Implicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg("duration")),
{"duration__avg": datetime.timedelta(days=1, hours=12)},
)
def test_sum_duration_field(self):
self.assertEqual(
Publisher.objects.aggregate(Sum("duration", output_field=DurationField())),
{"duration__sum": datetime.timedelta(days=3)},
)
def test_sum_distinct_aggregate(self):
"""
Sum on a distinct() QuerySet should aggregate only the distinct items.
"""
authors = Author.objects.filter(book__in=[self.b5, self.b6])
self.assertEqual(authors.count(), 3)
distinct_authors = authors.distinct()
self.assertEqual(distinct_authors.count(), 2)
# Selected author ages are 57 and 46
age_sum = distinct_authors.aggregate(Sum("age"))
self.assertEqual(age_sum["age__sum"], 103)
def test_filtering(self):
p = Publisher.objects.create(name="Expensive Publisher", num_awards=0)
Book.objects.create(
name="ExpensiveBook1",
pages=1,
isbn="111",
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 1),
)
Book.objects.create(
name="ExpensiveBook2",
pages=1,
isbn="222",
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 2),
)
Book.objects.create(
name="ExpensiveBook3",
pages=1,
isbn="333",
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 3),
)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerysetEqual(
publishers,
["Apress", "Prentice Hall", "Expensive Publisher"],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by(
"pk"
)
self.assertQuerysetEqual(
publishers,
[
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1, book__price__lt=Decimal("40.0"))
.order_by("pk")
)
self.assertQuerysetEqual(
publishers,
["Apress", "Prentice Hall", "Expensive Publisher"],
lambda p: p.name,
)
publishers = (
Publisher.objects.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerysetEqual(publishers, ["Apress"], lambda p: p.name)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__range=[1, 3])
.order_by("pk")
)
self.assertQuerysetEqual(
publishers,
[
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__range=[1, 2])
.order_by("pk")
)
self.assertQuerysetEqual(
publishers,
["Apress", "Sams", "Prentice Hall", "Morgan Kaufmann"],
lambda p: p.name,
)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__in=[1, 3])
.order_by("pk")
)
self.assertQuerysetEqual(
publishers,
["Sams", "Morgan Kaufmann", "Expensive Publisher"],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(
num_books__isnull=True
)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = (
Book.objects.annotate(num_authors=Count("authors__name"))
.filter(num_authors__exact=2)
.order_by("pk")
)
self.assertQuerysetEqual(
books,
[
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name,
)
authors = (
Author.objects.annotate(num_friends=Count("friends__id", distinct=True))
.filter(num_friends=0)
.order_by("pk")
)
self.assertQuerysetEqual(authors, ["Brad Dayley"], lambda a: a.name)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerysetEqual(
publishers, ["Apress", "Prentice Hall"], lambda p: p.name
)
publishers = (
Publisher.objects.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
)
self.assertQuerysetEqual(publishers, ["Apress"], lambda p: p.name)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(
authors__name__contains="Norvig", num_authors__gt=1
)
self.assertQuerysetEqual(
books, ["Artificial Intelligence: A Modern Approach"], lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains="Norvig")
b = Book.objects.get(name__contains="Done Right")
b.authors.add(a)
b.save()
vals = (
Book.objects.annotate(num_authors=Count("authors__id"))
.filter(authors__name__contains="Norvig", num_authors__gt=1)
.aggregate(Avg("rating"))
)
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = (
Publisher.objects.annotate(
earliest_book=Min("book__pubdate"),
)
.exclude(earliest_book=None)
.order_by("earliest_book")
.values(
"earliest_book",
"num_awards",
"id",
"name",
)
)
self.assertEqual(
list(publishers),
[
{
"earliest_book": datetime.date(1991, 10, 15),
"num_awards": 9,
"id": self.p4.id,
"name": "Morgan Kaufmann",
},
{
"earliest_book": datetime.date(1995, 1, 15),
"num_awards": 7,
"id": self.p3.id,
"name": "Prentice Hall",
},
{
"earliest_book": datetime.date(2007, 12, 6),
"num_awards": 3,
"id": self.p1.id,
"name": "Apress",
},
{
"earliest_book": datetime.date(2008, 3, 3),
"num_awards": 1,
"id": self.p2.id,
"name": "Sams",
},
],
)
vals = Store.objects.aggregate(
Max("friday_night_closing"), Min("original_opening")
)
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
)
def test_annotate_values_list(self):
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("pk", "isbn", "mean_age")
)
self.assertEqual(list(books), [(self.b1.id, "159059725", 34.5)])
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("isbn")
)
self.assertEqual(list(books), [("159059725",)])
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("mean_age")
)
self.assertEqual(list(books), [(34.5,)])
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("mean_age", flat=True)
)
self.assertEqual(list(books), [34.5])
books = (
Book.objects.values_list("price")
.annotate(count=Count("price"))
.order_by("-count", "price")
)
self.assertEqual(
list(books),
[
(Decimal("29.69"), 2),
(Decimal("23.09"), 1),
(Decimal("30"), 1),
(Decimal("75"), 1),
(Decimal("82.8"), 1),
],
)
def test_dates_with_aggregation(self):
"""
.dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates(
"pubdate", "year"
)
self.assertSequenceEqual(
dates,
[
datetime.date(1991, 1, 1),
datetime.date(1995, 1, 1),
datetime.date(2007, 1, 1),
datetime.date(2008, 1, 1),
],
)
def test_values_aggregation(self):
# Refs #20782
max_rating = Book.objects.values("rating").aggregate(max_rating=Max("rating"))
self.assertEqual(max_rating["max_rating"], 5)
max_books_per_rating = (
Book.objects.values("rating")
.annotate(books_per_rating=Count("id"))
.aggregate(Max("books_per_rating"))
)
self.assertEqual(max_books_per_rating, {"books_per_rating__max": 3})
def test_ticket17424(self):
"""
Doing exclude() on a foreign model after annotate() doesn't crash.
"""
all_books = list(Book.objects.values_list("pk", flat=True).order_by("pk"))
annotated_books = Book.objects.order_by("pk").annotate(one=Count("id"))
# The value doesn't matter, we just need any negative
# constraint on a related model that's a noop.
excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__")
# Try to generate query tree
str(excluded_books.query)
self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk)
# Check internal state
self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type)
def test_ticket12886(self):
"""
Aggregation over sliced queryset works correctly.
"""
qs = Book.objects.order_by("-rating")[0:3]
vals = qs.aggregate(average_top3_rating=Avg("rating"))["average_top3_rating"]
self.assertAlmostEqual(vals, 4.5, places=2)
def test_ticket11881(self):
"""
Subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE or
select_related() stuff.
"""
qs = (
Book.objects.select_for_update()
.order_by("pk")
.select_related("publisher")
.annotate(max_pk=Max("pk"))
)
with CaptureQueriesContext(connection) as captured_queries:
qs.aggregate(avg_pk=Avg("max_pk"))
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]["sql"].lower()
self.assertNotIn("for update", qstr)
forced_ordering = connection.ops.force_no_ordering()
if forced_ordering:
# If the backend needs to force an ordering we make sure it's
# the only "ORDER BY" clause present in the query.
self.assertEqual(
re.findall(r"order by (\w+)", qstr),
[", ".join(f[1][0] for f in forced_ordering).lower()],
)
else:
self.assertNotIn("order by", qstr)
self.assertEqual(qstr.count(" join "), 0)
def test_decimal_max_digits_has_no_effect(self):
Book.objects.all().delete()
a1 = Author.objects.first()
p1 = Publisher.objects.first()
thedate = timezone.now()
for i in range(10):
Book.objects.create(
isbn="abcde{}".format(i),
name="none",
pages=10,
rating=4.0,
price=9999.98,
contact=a1,
publisher=p1,
pubdate=thedate,
)
book = Book.objects.aggregate(price_sum=Sum("price"))
self.assertEqual(book["price_sum"], Decimal("99999.80"))
def test_nonaggregate_aggregation_throws(self):
with self.assertRaisesMessage(TypeError, "fail is not an aggregate expression"):
Book.objects.aggregate(fail=F("price"))
def test_nonfield_annotation(self):
book = Book.objects.annotate(val=Max(Value(2))).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(
val=Max(Value(2), output_field=IntegerField())
).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
def test_annotation_expressions(self):
authors = Author.objects.annotate(
combined_ages=Sum(F("age") + F("friends__age"))
).order_by("name")
authors2 = Author.objects.annotate(
combined_ages=Sum("age") + Sum("friends__age")
).order_by("name")
for qs in (authors, authors2):
self.assertQuerysetEqual(
qs,
[
("Adrian Holovaty", 132),
("Brad Dayley", None),
("Jacob Kaplan-Moss", 129),
("James Bennett", 63),
("Jeffrey Forcier", 128),
("Paul Bissex", 120),
("Peter Norvig", 103),
("Stuart Russell", 103),
("Wesley J. Chun", 176),
],
lambda a: (a.name, a.combined_ages),
)
def test_aggregation_expressions(self):
a1 = Author.objects.aggregate(av_age=Sum("age") / Count("*"))
a2 = Author.objects.aggregate(av_age=Sum("age") / Count("age"))
a3 = Author.objects.aggregate(av_age=Avg("age"))
self.assertEqual(a1, {"av_age": 37})
self.assertEqual(a2, {"av_age": 37})
self.assertEqual(a3, {"av_age": Approximate(37.4, places=1)})
def test_avg_decimal_field(self):
v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg("price")))[
"avg_price"
]
self.assertIsInstance(v, Decimal)
self.assertEqual(v, Approximate(Decimal("47.39"), places=2))
def test_order_of_precedence(self):
p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg("price") + 2) * 3)
self.assertEqual(p1, {"avg_price": Approximate(Decimal("148.18"), places=2)})
p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg("price") + 2 * 3)
self.assertEqual(p2, {"avg_price": Approximate(Decimal("53.39"), places=2)})
def test_combine_different_types(self):
msg = (
"Cannot infer type of '+' expression involving these types: FloatField, "
"DecimalField. You must set output_field."
)
qs = Book.objects.annotate(sums=Sum("rating") + Sum("pages") + Sum("price"))
with self.assertRaisesMessage(FieldError, msg):
qs.first()
with self.assertRaisesMessage(FieldError, msg):
qs.first()
b1 = Book.objects.annotate(
sums=Sum(F("rating") + F("pages") + F("price"), output_field=IntegerField())
).get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
b2 = Book.objects.annotate(
sums=Sum(F("rating") + F("pages") + F("price"), output_field=FloatField())
).get(pk=self.b4.pk)
self.assertEqual(b2.sums, 383.69)
b3 = Book.objects.annotate(
sums=Sum(F("rating") + F("pages") + F("price"), output_field=DecimalField())
).get(pk=self.b4.pk)
self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2))
def test_complex_aggregations_require_kwarg(self):
with self.assertRaisesMessage(
TypeError, "Complex annotations require an alias"
):
Author.objects.annotate(Sum(F("age") + F("friends__age")))
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
Author.objects.aggregate(Sum("age") / Count("age"))
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
Author.objects.aggregate(Sum(1))
def test_aggregate_over_complex_annotation(self):
qs = Author.objects.annotate(combined_ages=Sum(F("age") + F("friends__age")))
age = qs.aggregate(max_combined_age=Max("combined_ages"))
self.assertEqual(age["max_combined_age"], 176)
age = qs.aggregate(max_combined_age_doubled=Max("combined_ages") * 2)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max("combined_ages") + Max("combined_ages")
)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max("combined_ages") + Max("combined_ages"),
sum_combined_age=Sum("combined_ages"),
)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
self.assertEqual(age["sum_combined_age"], 954)
age = qs.aggregate(
max_combined_age_doubled=Max("combined_ages") + Max("combined_ages"),
sum_combined_age_doubled=Sum("combined_ages") + Sum("combined_ages"),
)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
self.assertEqual(age["sum_combined_age_doubled"], 954 * 2)
def test_values_annotation_with_expression(self):
# ensure the F() is promoted to the group by clause
qs = Author.objects.values("name").annotate(another_age=Sum("age") + F("age"))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a["another_age"], 68)
qs = qs.annotate(friend_count=Count("friends"))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a["friend_count"], 2)
qs = (
qs.annotate(combined_age=Sum("age") + F("friends__age"))
.filter(name="Adrian Holovaty")
.order_by("-combined_age")
)
self.assertEqual(
list(qs),
[
{
"name": "Adrian Holovaty",
"another_age": 68,
"friend_count": 1,
"combined_age": 69,
},
{
"name": "Adrian Holovaty",
"another_age": 68,
"friend_count": 1,
"combined_age": 63,
},
],
)
vals = qs.values("name", "combined_age")
self.assertEqual(
list(vals),
[
{"name": "Adrian Holovaty", "combined_age": 69},
{"name": "Adrian Holovaty", "combined_age": 63},
],
)
def test_annotate_values_aggregate(self):
alias_age = (
Author.objects.annotate(age_alias=F("age"))
.values(
"age_alias",
)
.aggregate(sum_age=Sum("age_alias"))
)
age = Author.objects.values("age").aggregate(sum_age=Sum("age"))
self.assertEqual(alias_age["sum_age"], age["sum_age"])
def test_annotate_over_annotate(self):
author = (
Author.objects.annotate(age_alias=F("age"))
.annotate(sum_age=Sum("age_alias"))
.get(name="Adrian Holovaty")
)
other_author = Author.objects.annotate(sum_age=Sum("age")).get(
name="Adrian Holovaty"
)
self.assertEqual(author.sum_age, other_author.sum_age)
def test_aggregate_over_aggregate(self):
msg = "Cannot compute Avg('age'): 'age' is an aggregate"
with self.assertRaisesMessage(FieldError, msg):
Author.objects.annotate(age_alias=F("age"),).aggregate(
age=Sum(F("age")),
avg_age=Avg(F("age")),
)
def test_annotated_aggregate_over_annotated_aggregate(self):
with self.assertRaisesMessage(
FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate"
):
Book.objects.annotate(Max("id")).annotate(Sum("id__max"))
class MyMax(Max):
def as_sql(self, compiler, connection):
self.set_source_expressions(self.get_source_expressions()[0:1])
return super().as_sql(compiler, connection)
with self.assertRaisesMessage(
FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate"
):
Book.objects.annotate(Max("id")).annotate(my_max=MyMax("id__max", "price"))
def test_multi_arg_aggregate(self):
class MyMax(Max):
output_field = DecimalField()
def as_sql(self, compiler, connection):
copy = self.copy()
copy.set_source_expressions(copy.get_source_expressions()[0:1])
return super(MyMax, copy).as_sql(compiler, connection)
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
Book.objects.aggregate(MyMax("pages", "price"))
with self.assertRaisesMessage(
TypeError, "Complex annotations require an alias"
):
Book.objects.annotate(MyMax("pages", "price"))
Book.objects.aggregate(max_field=MyMax("pages", "price"))
def test_add_implementation(self):
class MySum(Sum):
pass
# test completely changing how the output is rendered
def lower_case_function_override(self, compiler, connection):
sql, params = compiler.compile(self.source_expressions[0])
substitutions = {
"function": self.function.lower(),
"expressions": sql,
"distinct": "",
}
substitutions.update(self.extra)
return self.template % substitutions, params
setattr(MySum, "as_" + connection.vendor, lower_case_function_override)
qs = Book.objects.annotate(
sums=MySum(
F("rating") + F("pages") + F("price"), output_field=IntegerField()
)
)
self.assertEqual(str(qs.query).count("sum("), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test changing the dict and delegating
def lower_case_function_super(self, compiler, connection):
self.extra["function"] = self.function.lower()
return super(MySum, self).as_sql(compiler, connection)
setattr(MySum, "as_" + connection.vendor, lower_case_function_super)
qs = Book.objects.annotate(
sums=MySum(
F("rating") + F("pages") + F("price"), output_field=IntegerField()
)
)
self.assertEqual(str(qs.query).count("sum("), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test overriding all parts of the template
def be_evil(self, compiler, connection):
substitutions = {"function": "MAX", "expressions": "2", "distinct": ""}
substitutions.update(self.extra)
return self.template % substitutions, ()
setattr(MySum, "as_" + connection.vendor, be_evil)
qs = Book.objects.annotate(
sums=MySum(
F("rating") + F("pages") + F("price"), output_field=IntegerField()
)
)
self.assertEqual(str(qs.query).count("MAX("), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 2)
def test_complex_values_aggregation(self):
max_rating = Book.objects.values("rating").aggregate(
double_max_rating=Max("rating") + Max("rating")
)
self.assertEqual(max_rating["double_max_rating"], 5 * 2)
max_books_per_rating = (
Book.objects.values("rating")
.annotate(books_per_rating=Count("id") + 5)
.aggregate(Max("books_per_rating"))
)
self.assertEqual(max_books_per_rating, {"books_per_rating__max": 3 + 5})
def test_expression_on_aggregation(self):
qs = (
Publisher.objects.annotate(
price_or_median=Greatest(
Avg("book__rating", output_field=DecimalField()), Avg("book__price")
)
)
.filter(price_or_median__gte=F("num_awards"))
.order_by("num_awards")
)
self.assertQuerysetEqual(qs, [1, 3, 7, 9], lambda v: v.num_awards)
qs2 = (
Publisher.objects.annotate(
rating_or_num_awards=Greatest(
Avg("book__rating"), F("num_awards"), output_field=FloatField()
)
)
.filter(rating_or_num_awards__gt=F("num_awards"))
.order_by("num_awards")
)
self.assertQuerysetEqual(qs2, [1, 3], lambda v: v.num_awards)
def test_arguments_must_be_expressions(self):
msg = "QuerySet.aggregate() received non-expression(s): %s."
with self.assertRaisesMessage(TypeError, msg % FloatField()):
Book.objects.aggregate(FloatField())
with self.assertRaisesMessage(TypeError, msg % True):
Book.objects.aggregate(is_book=True)
with self.assertRaisesMessage(
TypeError, msg % ", ".join([str(FloatField()), "True"])
):
Book.objects.aggregate(FloatField(), Avg("price"), is_book=True)
def test_aggregation_subquery_annotation(self):
"""Subquery annotations are excluded from the GROUP BY if they are
not explicitly grouped against."""
latest_book_pubdate_qs = (
Book.objects.filter(publisher=OuterRef("pk"))
.order_by("-pubdate")
.values("pubdate")[:1]
)
publisher_qs = Publisher.objects.annotate(
latest_book_pubdate=Subquery(latest_book_pubdate_qs),
).annotate(count=Count("book"))
with self.assertNumQueries(1) as ctx:
list(publisher_qs)
self.assertEqual(ctx[0]["sql"].count("SELECT"), 2)
# The GROUP BY should not be by alias either.
self.assertEqual(ctx[0]["sql"].lower().count("latest_book_pubdate"), 1)
def test_aggregation_subquery_annotation_exists(self):
latest_book_pubdate_qs = (
Book.objects.filter(publisher=OuterRef("pk"))
.order_by("-pubdate")
.values("pubdate")[:1]
)
publisher_qs = Publisher.objects.annotate(
latest_book_pubdate=Subquery(latest_book_pubdate_qs),
count=Count("book"),
)
self.assertTrue(publisher_qs.exists())
def test_aggregation_filter_exists(self):
publishers_having_more_than_one_book_qs = (
Book.objects.values("publisher")
.annotate(cnt=Count("isbn"))
.filter(cnt__gt=1)
)
query = publishers_having_more_than_one_book_qs.query.exists()
_, _, group_by = query.get_compiler(connection=connection).pre_sql_setup()
self.assertEqual(len(group_by), 1)
def test_aggregation_exists_annotation(self):
published_books = Book.objects.filter(publisher=OuterRef("pk"))
publisher_qs = Publisher.objects.annotate(
published_book=Exists(published_books),
count=Count("book"),
).values_list("name", flat=True)
self.assertCountEqual(
list(publisher_qs),
[
"Apress",
"Morgan Kaufmann",
"Jonno's House of Books",
"Prentice Hall",
"Sams",
],
)
def test_aggregation_subquery_annotation_values(self):
"""
Subquery annotations and external aliases are excluded from the GROUP
BY if they are not selected.
"""
books_qs = (
Book.objects.annotate(
first_author_the_same_age=Subquery(
Author.objects.filter(
age=OuterRef("contact__friends__age"),
)
.order_by("age")
.values("id")[:1],
)
)
.filter(
publisher=self.p1,
first_author_the_same_age__isnull=False,
)
.annotate(
min_age=Min("contact__friends__age"),
)
.values("name", "min_age")
.order_by("name")
)
self.assertEqual(
list(books_qs),
[
{"name": "Practical Django Projects", "min_age": 34},
{
"name": (
"The Definitive Guide to Django: Web Development Done Right"
),
"min_age": 29,
},
],
)
def test_aggregation_subquery_annotation_values_collision(self):
books_rating_qs = Book.objects.filter(
publisher=OuterRef("pk"),
price=Decimal("29.69"),
).values("rating")
publisher_qs = (
Publisher.objects.filter(
book__contact__age__gt=20,
name=self.p1.name,
)
.annotate(
rating=Subquery(books_rating_qs),
contacts_count=Count("book__contact"),
)
.values("rating")
.annotate(total_count=Count("rating"))
)
self.assertEqual(
list(publisher_qs),
[
{"rating": 4.0, "total_count": 2},
],
)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_subquery_annotation_multivalued(self):
"""
Subquery annotations must be included in the GROUP BY if they use
potentially multivalued relations (contain the LOOKUP_SEP).
"""
subquery_qs = Author.objects.filter(
pk=OuterRef("pk"),
book__name=OuterRef("book__name"),
).values("pk")
author_qs = Author.objects.annotate(
subquery_id=Subquery(subquery_qs),
).annotate(count=Count("book"))
self.assertEqual(author_qs.count(), Author.objects.count())
def test_aggregation_order_by_not_selected_annotation_values(self):
result_asc = [
self.b4.pk,
self.b3.pk,
self.b1.pk,
self.b2.pk,
self.b5.pk,
self.b6.pk,
]
result_desc = result_asc[::-1]
tests = [
("min_related_age", result_asc),
("-min_related_age", result_desc),
(F("min_related_age"), result_asc),
(F("min_related_age").asc(), result_asc),
(F("min_related_age").desc(), result_desc),
]
for ordering, expected_result in tests:
with self.subTest(ordering=ordering):
books_qs = (
Book.objects.annotate(
min_age=Min("authors__age"),
)
.annotate(
min_related_age=Coalesce("min_age", "contact__age"),
)
.order_by(ordering)
.values_list("pk", flat=True)
)
self.assertEqual(list(books_qs), expected_result)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_group_by_subquery_annotation(self):
"""
Subquery annotations are included in the GROUP BY if they are
grouped against.
"""
long_books_count_qs = (
Book.objects.filter(
publisher=OuterRef("pk"),
pages__gt=400,
)
.values("publisher")
.annotate(count=Count("pk"))
.values("count")
)
groups = [
Subquery(long_books_count_qs),
long_books_count_qs,
long_books_count_qs.query,
]
for group in groups:
with self.subTest(group=group.__class__.__name__):
long_books_count_breakdown = Publisher.objects.values_list(
group,
).annotate(total=Count("*"))
self.assertEqual(dict(long_books_count_breakdown), {None: 1, 1: 4})
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_group_by_exists_annotation(self):
"""
Exists annotations are included in the GROUP BY if they are
grouped against.
"""
long_books_qs = Book.objects.filter(
publisher=OuterRef("pk"),
pages__gt=800,
)
has_long_books_breakdown = Publisher.objects.values_list(
Exists(long_books_qs),
).annotate(total=Count("*"))
self.assertEqual(dict(has_long_books_breakdown), {True: 2, False: 3})
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_subquery_annotation_related_field(self):
publisher = Publisher.objects.create(name=self.a9.name, num_awards=2)
book = Book.objects.create(
isbn="159059999",
name="Test book.",
pages=819,
rating=2.5,
price=Decimal("14.44"),
contact=self.a9,
publisher=publisher,
pubdate=datetime.date(2019, 12, 6),
)
book.authors.add(self.a5, self.a6, self.a7)
books_qs = (
Book.objects.annotate(
contact_publisher=Subquery(
Publisher.objects.filter(
pk=OuterRef("publisher"),
name=OuterRef("contact__name"),
).values("name")[:1],
)
)
.filter(
contact_publisher__isnull=False,
)
.annotate(count=Count("authors"))
)
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(books_qs, [book])
# Outerquery SELECT, annotation SELECT, and WHERE SELECT but GROUP BY
# selected alias, if allowed.
if connection.features.allows_group_by_refs:
self.assertEqual(ctx[0]["sql"].count("SELECT"), 3)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_nested_subquery_outerref(self):
publisher_with_same_name = Publisher.objects.filter(
id__in=Subquery(
Publisher.objects.filter(
name=OuterRef(OuterRef("publisher__name")),
).values("id"),
),
).values(publisher_count=Count("id"))[:1]
books_breakdown = Book.objects.annotate(
publisher_count=Subquery(publisher_with_same_name),
authors_count=Count("authors"),
).values_list("publisher_count", flat=True)
self.assertSequenceEqual(books_breakdown, [1] * 6)
def test_aggregation_exists_multivalued_outeref(self):
self.assertCountEqual(
Publisher.objects.annotate(
books_exists=Exists(
Book.objects.filter(publisher=OuterRef("book__publisher"))
),
books_count=Count("book"),
),
Publisher.objects.all(),
)
def test_filter_in_subquery_or_aggregation(self):
"""
Filtering against an aggregate requires the usage of the HAVING clause.
If such a filter is unionized to a non-aggregate one the latter will
also need to be moved to the HAVING clause and have its grouping
columns used in the GROUP BY.
When this is done with a subquery the specialized logic in charge of
using outer reference columns to group should be used instead of the
subquery itself as the latter might return multiple rows.
"""
authors = Author.objects.annotate(
Count("book"),
).filter(Q(book__count__gt=0) | Q(pk__in=Book.objects.values("authors")))
self.assertQuerysetEqual(authors, Author.objects.all(), ordered=False)
def test_aggregation_random_ordering(self):
"""Random() is not included in the GROUP BY when used for ordering."""
authors = Author.objects.annotate(contact_count=Count("book")).order_by("?")
self.assertQuerysetEqual(
authors,
[
("Adrian Holovaty", 1),
("Jacob Kaplan-Moss", 1),
("Brad Dayley", 1),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 1),
("Wesley J. Chun", 1),
("Stuart Russell", 1),
("Peter Norvig", 2),
],
lambda a: (a.name, a.contact_count),
ordered=False,
)
def test_empty_result_optimization(self):
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Sum("num_awards"),
books_count=Count("book"),
),
{
"sum_awards": None,
"books_count": 0,
},
)
# Expression without empty_result_set_value forces queries to be
# executed even if they would return an empty result set.
raw_books_count = Func("book", function="COUNT")
raw_books_count.contains_aggregate = True
with self.assertNumQueries(1):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Sum("num_awards"),
books_count=raw_books_count,
),
{
"sum_awards": None,
"books_count": 0,
},
)
def test_coalesced_empty_result_set(self):
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Coalesce(Sum("num_awards"), 0),
)["sum_awards"],
0,
)
# Multiple expressions.
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Coalesce(Sum("num_awards"), None, 0),
)["sum_awards"],
0,
)
# Nested coalesce.
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Coalesce(Coalesce(Sum("num_awards"), None), 0),
)["sum_awards"],
0,
)
# Expression coalesce.
with self.assertNumQueries(1):
self.assertIsInstance(
Store.objects.none().aggregate(
latest_opening=Coalesce(
Max("original_opening"),
RawSQL("CURRENT_TIMESTAMP", []),
),
)["latest_opening"],
datetime.datetime,
)
def test_aggregation_default_unsupported_by_count(self):
msg = "Count does not allow default."
with self.assertRaisesMessage(TypeError, msg):
Count("age", default=0)
def test_aggregation_default_unset(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age"),
)
self.assertIsNone(result["value"])
def test_aggregation_default_zero(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age", default=0),
)
self.assertEqual(result["value"], 0)
def test_aggregation_default_integer(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age", default=21),
)
self.assertEqual(result["value"], 21)
def test_aggregation_default_expression(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age", default=Value(5) * Value(7)),
)
self.assertEqual(result["value"], 35)
def test_aggregation_default_group_by(self):
qs = (
Publisher.objects.values("name")
.annotate(
books=Count("book"),
pages=Sum("book__pages", default=0),
)
.filter(books=0)
)
self.assertSequenceEqual(
qs,
[{"name": "Jonno's House of Books", "books": 0, "pages": 0}],
)
def test_aggregation_default_compound_expression(self):
# Scale rating to a percentage; default to 50% if no books published.
formula = Avg("book__rating", default=2.5) * 20.0
queryset = Publisher.objects.annotate(rating=formula).order_by("name")
self.assertSequenceEqual(
queryset.values("name", "rating"),
[
{"name": "Apress", "rating": 85.0},
{"name": "Jonno's House of Books", "rating": 50.0},
{"name": "Morgan Kaufmann", "rating": 100.0},
{"name": "Prentice Hall", "rating": 80.0},
{"name": "Sams", "rating": 60.0},
],
)
def test_aggregation_default_using_time_from_python(self):
expr = Min(
"store__friday_night_closing",
filter=~Q(store__name="Amazon.com"),
default=datetime.time(17),
)
if connection.vendor == "mysql":
# Workaround for #30224 for MySQL & MariaDB.
expr.default = Cast(expr.default, TimeField())
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{"isbn": "013235613", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "013790395",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "067232959", "oldest_store_opening": datetime.time(17)},
{"isbn": "155860191", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "159059725",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "159059996", "oldest_store_opening": datetime.time(21, 30)},
],
)
def test_aggregation_default_using_time_from_database(self):
now = timezone.now().astimezone(datetime.timezone.utc)
expr = Min(
"store__friday_night_closing",
filter=~Q(store__name="Amazon.com"),
default=TruncHour(NowUTC(), output_field=TimeField()),
)
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{"isbn": "013235613", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "013790395",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "067232959", "oldest_store_opening": datetime.time(now.hour)},
{"isbn": "155860191", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "159059725",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "159059996", "oldest_store_opening": datetime.time(21, 30)},
],
)
def test_aggregation_default_using_date_from_python(self):
expr = Min("book__pubdate", default=datetime.date(1970, 1, 1))
if connection.vendor == "mysql":
# Workaround for #30224 for MySQL & MariaDB.
expr.default = Cast(expr.default, DateField())
queryset = Publisher.objects.annotate(earliest_pubdate=expr).order_by("name")
self.assertSequenceEqual(
queryset.values("name", "earliest_pubdate"),
[
{"name": "Apress", "earliest_pubdate": datetime.date(2007, 12, 6)},
{
"name": "Jonno's House of Books",
"earliest_pubdate": datetime.date(1970, 1, 1),
},
{
"name": "Morgan Kaufmann",
"earliest_pubdate": datetime.date(1991, 10, 15),
},
{
"name": "Prentice Hall",
"earliest_pubdate": datetime.date(1995, 1, 15),
},
{"name": "Sams", "earliest_pubdate": datetime.date(2008, 3, 3)},
],
)
def test_aggregation_default_using_date_from_database(self):
now = timezone.now().astimezone(datetime.timezone.utc)
expr = Min("book__pubdate", default=TruncDate(NowUTC()))
queryset = Publisher.objects.annotate(earliest_pubdate=expr).order_by("name")
self.assertSequenceEqual(
queryset.values("name", "earliest_pubdate"),
[
{"name": "Apress", "earliest_pubdate": datetime.date(2007, 12, 6)},
{"name": "Jonno's House of Books", "earliest_pubdate": now.date()},
{
"name": "Morgan Kaufmann",
"earliest_pubdate": datetime.date(1991, 10, 15),
},
{
"name": "Prentice Hall",
"earliest_pubdate": datetime.date(1995, 1, 15),
},
{"name": "Sams", "earliest_pubdate": datetime.date(2008, 3, 3)},
],
)
def test_aggregation_default_using_datetime_from_python(self):
expr = Min(
"store__original_opening",
filter=~Q(store__name="Amazon.com"),
default=datetime.datetime(1970, 1, 1),
)
if connection.vendor == "mysql":
# Workaround for #30224 for MySQL & MariaDB.
expr.default = Cast(expr.default, DateTimeField())
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{
"isbn": "013235613",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "013790395",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "067232959",
"oldest_store_opening": datetime.datetime(1970, 1, 1),
},
{
"isbn": "155860191",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "159059725",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "159059996",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
],
)
def test_aggregation_default_using_datetime_from_database(self):
now = timezone.now().astimezone(datetime.timezone.utc)
expr = Min(
"store__original_opening",
filter=~Q(store__name="Amazon.com"),
default=TruncHour(NowUTC(), output_field=DateTimeField()),
)
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{
"isbn": "013235613",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "013790395",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "067232959",
"oldest_store_opening": now.replace(
minute=0, second=0, microsecond=0, tzinfo=None
),
},
{
"isbn": "155860191",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "159059725",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "159059996",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
],
)
def test_aggregation_default_using_duration_from_python(self):
result = Publisher.objects.filter(num_awards__gt=3).aggregate(
value=Sum("duration", default=datetime.timedelta(0)),
)
self.assertEqual(result["value"], datetime.timedelta(0))
def test_aggregation_default_using_duration_from_database(self):
result = Publisher.objects.filter(num_awards__gt=3).aggregate(
value=Sum("duration", default=Now() - Now()),
)
self.assertEqual(result["value"], datetime.timedelta(0))
def test_aggregation_default_using_decimal_from_python(self):
result = Book.objects.filter(rating__lt=3.0).aggregate(
value=Sum("price", default=Decimal("0.00")),
)
self.assertEqual(result["value"], Decimal("0.00"))
def test_aggregation_default_using_decimal_from_database(self):
result = Book.objects.filter(rating__lt=3.0).aggregate(
value=Sum("price", default=Pi()),
)
self.assertAlmostEqual(result["value"], Decimal.from_float(math.pi), places=6)
def test_aggregation_default_passed_another_aggregate(self):
result = Book.objects.aggregate(
value=Sum("price", filter=Q(rating__lt=3.0), default=Avg("pages") / 10.0),
)
self.assertAlmostEqual(result["value"], Decimal("61.72"), places=2)
def test_aggregation_default_after_annotation(self):
result = Publisher.objects.annotate(
double_num_awards=F("num_awards") * 2,
).aggregate(value=Sum("double_num_awards", default=0))
self.assertEqual(result["value"], 40)
def test_aggregation_default_not_in_aggregate(self):
result = Publisher.objects.annotate(
avg_rating=Avg("book__rating", default=2.5),
).aggregate(Sum("num_awards"))
self.assertEqual(result["num_awards__sum"], 20)
def test_exists_none_with_aggregate(self):
qs = Book.objects.annotate(
count=Count("id"),
exists=Exists(Author.objects.none()),
)
self.assertEqual(len(qs), 6)
def test_alias_sql_injection(self):
crafted_alias = """injected_name" from "aggregation_author"; --"""
msg = (
"Column aliases cannot contain whitespace characters, quotation marks, "
"semicolons, or SQL comments."
)
with self.assertRaisesMessage(ValueError, msg):
Author.objects.aggregate(**{crafted_alias: Avg("age")})
def test_exists_extra_where_with_aggregate(self):
qs = Book.objects.annotate(
count=Count("id"),
exists=Exists(Author.objects.extra(where=["1=0"])),
)
self.assertEqual(len(qs), 6)
|
6a5ab6bf85a77a810bae3ffbe3cdeebbf92c423230c6bc8e0bca3afcaa6431ac | import datetime
import os
import re
import unittest
from unittest import mock
from urllib.parse import parse_qsl, urljoin, urlparse
try:
import zoneinfo
except ImportError:
from backports import zoneinfo
try:
import pytz
except ImportError:
pytz = None
from django.contrib import admin
from django.contrib.admin import AdminSite, ModelAdmin
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.contrib.admin.models import ADDITION, DELETION, LogEntry
from django.contrib.admin.options import TO_FIELD_VAR
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.admin.utils import quote
from django.contrib.admin.views.main import IS_POPUP_VAR
from django.contrib.auth import REDIRECT_FIELD_NAME, get_permission_codename
from django.contrib.auth.models import Group, Permission, User
from django.contrib.contenttypes.models import ContentType
from django.core import mail
from django.core.checks import Error
from django.core.files import temp as tempfile
from django.db import connection
from django.forms.utils import ErrorList
from django.template.response import TemplateResponse
from django.test import (
TestCase,
modify_settings,
override_settings,
skipUnlessDBFeature,
)
from django.test.utils import override_script_prefix
from django.urls import NoReverseMatch, resolve, reverse
from django.utils import formats, translation
from django.utils.cache import get_max_age
from django.utils.encoding import iri_to_uri
from django.utils.html import escape
from django.utils.http import urlencode
from . import customadmin
from .admin import CityAdmin, site, site2
from .models import (
Actor,
AdminOrderedAdminMethod,
AdminOrderedCallable,
AdminOrderedField,
AdminOrderedModelMethod,
Album,
Answer,
Answer2,
Article,
BarAccount,
Book,
Bookmark,
Box,
Category,
Chapter,
ChapterXtra1,
ChapterXtra2,
Character,
Child,
Choice,
City,
Collector,
Color,
ComplexSortedPerson,
CoverLetter,
CustomArticle,
CyclicOne,
CyclicTwo,
DooHickey,
Employee,
EmptyModel,
Fabric,
FancyDoodad,
FieldOverridePost,
FilteredManager,
FooAccount,
FoodDelivery,
FunkyTag,
Gallery,
Grommet,
Inquisition,
Language,
Link,
MainPrepopulated,
Media,
ModelWithStringPrimaryKey,
OtherStory,
Paper,
Parent,
ParentWithDependentChildren,
ParentWithUUIDPK,
Person,
Persona,
Picture,
Pizza,
Plot,
PlotDetails,
PluggableSearchPerson,
Podcast,
Post,
PrePopulatedPost,
Promo,
Question,
ReadablePizza,
ReadOnlyPizza,
ReadOnlyRelatedField,
Recommendation,
Recommender,
RelatedPrepopulated,
RelatedWithUUIDPKModel,
Report,
Restaurant,
RowLevelChangePermissionModel,
SecretHideout,
Section,
ShortMessage,
Simple,
Song,
State,
Story,
SuperSecretHideout,
SuperVillain,
Telegram,
TitleTranslation,
Topping,
Traveler,
UnchangeableObject,
UndeletableObject,
UnorderedObject,
UserProxy,
Villain,
Vodcast,
Whatsit,
Widget,
Worker,
WorkHour,
)
ERROR_MESSAGE = "Please enter the correct username and password \
for a staff account. Note that both fields may be case-sensitive."
MULTIPART_ENCTYPE = 'enctype="multipart/form-data"'
def make_aware_datetimes(dt, iana_key):
"""Makes one aware datetime for each supported time zone provider."""
yield dt.replace(tzinfo=zoneinfo.ZoneInfo(iana_key))
if pytz is not None:
yield pytz.timezone(iana_key).localize(dt, is_dst=None)
class AdminFieldExtractionMixin:
"""
Helper methods for extracting data from AdminForm.
"""
def get_admin_form_fields(self, response):
"""
Return a list of AdminFields for the AdminForm in the response.
"""
fields = []
for fieldset in response.context["adminform"]:
for field_line in fieldset:
fields.extend(field_line)
return fields
def get_admin_readonly_fields(self, response):
"""
Return the readonly fields for the response's AdminForm.
"""
return [f for f in self.get_admin_form_fields(response) if f.is_readonly]
def get_admin_readonly_field(self, response, field_name):
"""
Return the readonly field for the given field_name.
"""
admin_readonly_fields = self.get_admin_readonly_fields(response)
for field in admin_readonly_fields:
if field.field["name"] == field_name:
return field
@override_settings(ROOT_URLCONF="admin_views.urls", USE_I18N=True, LANGUAGE_CODE="en")
class AdminViewBasicTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
title="Article 1",
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
title="Article 2",
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.color1 = Color.objects.create(value="Red", warm=True)
cls.color2 = Color.objects.create(value="Orange", warm=True)
cls.color3 = Color.objects.create(value="Blue", warm=False)
cls.color4 = Color.objects.create(value="Green", warm=False)
cls.fab1 = Fabric.objects.create(surface="x")
cls.fab2 = Fabric.objects.create(surface="y")
cls.fab3 = Fabric.objects.create(surface="plain")
cls.b1 = Book.objects.create(name="Book 1")
cls.b2 = Book.objects.create(name="Book 2")
cls.pro1 = Promo.objects.create(name="Promo 1", book=cls.b1)
cls.pro1 = Promo.objects.create(name="Promo 2", book=cls.b2)
cls.chap1 = Chapter.objects.create(
title="Chapter 1", content="[ insert contents here ]", book=cls.b1
)
cls.chap2 = Chapter.objects.create(
title="Chapter 2", content="[ insert contents here ]", book=cls.b1
)
cls.chap3 = Chapter.objects.create(
title="Chapter 1", content="[ insert contents here ]", book=cls.b2
)
cls.chap4 = Chapter.objects.create(
title="Chapter 2", content="[ insert contents here ]", book=cls.b2
)
cls.cx1 = ChapterXtra1.objects.create(chap=cls.chap1, xtra="ChapterXtra1 1")
cls.cx2 = ChapterXtra1.objects.create(chap=cls.chap3, xtra="ChapterXtra1 2")
Actor.objects.create(name="Palin", age=27)
# Post data for edit inline
cls.inline_post_data = {
"name": "Test section",
# inline data
"article_set-TOTAL_FORMS": "6",
"article_set-INITIAL_FORMS": "3",
"article_set-MAX_NUM_FORMS": "0",
"article_set-0-id": cls.a1.pk,
# there is no title in database, give one here or formset will fail.
"article_set-0-title": "Norske bostaver æøå skaper problemer",
"article_set-0-content": "<p>Middle content</p>",
"article_set-0-date_0": "2008-03-18",
"article_set-0-date_1": "11:54:58",
"article_set-0-section": cls.s1.pk,
"article_set-1-id": cls.a2.pk,
"article_set-1-title": "Need a title.",
"article_set-1-content": "<p>Oldest content</p>",
"article_set-1-date_0": "2000-03-18",
"article_set-1-date_1": "11:54:58",
"article_set-2-id": cls.a3.pk,
"article_set-2-title": "Need a title.",
"article_set-2-content": "<p>Newest content</p>",
"article_set-2-date_0": "2009-03-18",
"article_set-2-date_1": "11:54:58",
"article_set-3-id": "",
"article_set-3-title": "",
"article_set-3-content": "",
"article_set-3-date_0": "",
"article_set-3-date_1": "",
"article_set-4-id": "",
"article_set-4-title": "",
"article_set-4-content": "",
"article_set-4-date_0": "",
"article_set-4-date_1": "",
"article_set-5-id": "",
"article_set-5-title": "",
"article_set-5-content": "",
"article_set-5-date_0": "",
"article_set-5-date_1": "",
}
def setUp(self):
self.client.force_login(self.superuser)
def assertContentBefore(self, response, text1, text2, failing_msg=None):
"""
Testing utility asserting that text1 appears before text2 in response
content.
"""
self.assertEqual(response.status_code, 200)
self.assertLess(
response.content.index(text1.encode()),
response.content.index(text2.encode()),
(failing_msg or "")
+ "\nResponse:\n"
+ response.content.decode(response.charset),
)
class AdminViewBasicTest(AdminViewBasicTestCase):
def test_trailing_slash_required(self):
"""
If you leave off the trailing slash, app should redirect and add it.
"""
add_url = reverse("admin:admin_views_article_add")
response = self.client.get(add_url[:-1])
self.assertRedirects(response, add_url, status_code=301)
def test_basic_add_GET(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
def test_add_with_GET_args(self):
response = self.client.get(
reverse("admin:admin_views_section_add"), {"name": "My Section"}
)
self.assertContains(
response,
'value="My Section"',
msg_prefix="Couldn't find an input with the right value in the response",
)
def test_basic_edit_GET(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
def test_basic_edit_GET_string_PK(self):
"""
GET on the change_view (when passing a string as the PK argument for a
model with an integer PK field) redirects to the index page with a
message saying the object doesn't exist.
"""
response = self.client.get(
reverse("admin:admin_views_section_change", args=(quote("abc/<b>"),)),
follow=True,
)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["section with ID “abc/<b>” doesn’t exist. Perhaps it was deleted?"],
)
def test_basic_edit_GET_old_url_redirect(self):
"""
The change URL changed in Django 1.9, but the old one still redirects.
"""
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)).replace(
"change/", ""
)
)
self.assertRedirects(
response, reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
def test_basic_inheritance_GET_string_PK(self):
"""
GET on the change_view (for inherited models) redirects to the index
page with a message saying the object doesn't exist.
"""
response = self.client.get(
reverse("admin:admin_views_supervillain_change", args=("abc",)), follow=True
)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["super villain with ID “abc” doesn’t exist. Perhaps it was deleted?"],
)
def test_basic_add_POST(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "Another Section",
# inline data
"article_set-TOTAL_FORMS": "3",
"article_set-INITIAL_FORMS": "0",
"article_set-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse("admin:admin_views_section_add"), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_popup_add_POST(self):
"""HTTP response from a popup is properly escaped."""
post_data = {
IS_POPUP_VAR: "1",
"title": "title with a new\nline",
"content": "some content",
"date_0": "2010-09-10",
"date_1": "14:55:39",
}
response = self.client.post(reverse("admin:admin_views_article_add"), post_data)
self.assertContains(response, "title with a new\\nline")
def test_basic_edit_POST(self):
"""
A smoke test to ensure POST on edit_view works.
"""
url = reverse("admin:admin_views_section_change", args=(self.s1.pk,))
response = self.client.post(url, self.inline_post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_edit_save_as(self):
"""
Test "save as".
"""
post_data = self.inline_post_data.copy()
post_data.update(
{
"_saveasnew": "Save+as+new",
"article_set-1-section": "1",
"article_set-2-section": "1",
"article_set-3-section": "1",
"article_set-4-section": "1",
"article_set-5-section": "1",
}
)
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), post_data
)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_edit_save_as_delete_inline(self):
"""
Should be able to "Save as new" while also deleting an inline.
"""
post_data = self.inline_post_data.copy()
post_data.update(
{
"_saveasnew": "Save+as+new",
"article_set-1-section": "1",
"article_set-2-section": "1",
"article_set-2-DELETE": "1",
"article_set-3-section": "1",
}
)
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), post_data
)
self.assertEqual(response.status_code, 302)
# started with 3 articles, one was deleted.
self.assertEqual(Section.objects.latest("id").article_set.count(), 2)
def test_change_list_column_field_classes(self):
response = self.client.get(reverse("admin:admin_views_article_changelist"))
# callables display the callable name.
self.assertContains(response, "column-callable_year")
self.assertContains(response, "field-callable_year")
# lambdas display as "lambda" + index that they appear in list_display.
self.assertContains(response, "column-lambda8")
self.assertContains(response, "field-lambda8")
def test_change_list_sorting_callable(self):
"""
Ensure we can sort on a list_display field that is a callable
(column 2 is callable_year in ArticleAdmin)
"""
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": 2}
)
self.assertContentBefore(
response,
"Oldest content",
"Middle content",
"Results of sorting on callable are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Newest content",
"Results of sorting on callable are out of order.",
)
def test_change_list_sorting_property(self):
"""
Sort on a list_display field that is a property (column 10 is
a property in Article model).
"""
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": 10}
)
self.assertContentBefore(
response,
"Oldest content",
"Middle content",
"Results of sorting on property are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Newest content",
"Results of sorting on property are out of order.",
)
def test_change_list_sorting_callable_query_expression(self):
"""Query expressions may be used for admin_order_field."""
tests = [
("order_by_expression", 9),
("order_by_f_expression", 12),
("order_by_orderby_expression", 13),
]
for admin_order_field, index in tests:
with self.subTest(admin_order_field):
response = self.client.get(
reverse("admin:admin_views_article_changelist"),
{"o": index},
)
self.assertContentBefore(
response,
"Oldest content",
"Middle content",
"Results of sorting on callable are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Newest content",
"Results of sorting on callable are out of order.",
)
def test_change_list_sorting_callable_query_expression_reverse(self):
tests = [
("order_by_expression", -9),
("order_by_f_expression", -12),
("order_by_orderby_expression", -13),
]
for admin_order_field, index in tests:
with self.subTest(admin_order_field):
response = self.client.get(
reverse("admin:admin_views_article_changelist"),
{"o": index},
)
self.assertContentBefore(
response,
"Middle content",
"Oldest content",
"Results of sorting on callable are out of order.",
)
self.assertContentBefore(
response,
"Newest content",
"Middle content",
"Results of sorting on callable are out of order.",
)
def test_change_list_sorting_model(self):
"""
Ensure we can sort on a list_display field that is a Model method
(column 3 is 'model_year' in ArticleAdmin)
"""
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": "-3"}
)
self.assertContentBefore(
response,
"Newest content",
"Middle content",
"Results of sorting on Model method are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Oldest content",
"Results of sorting on Model method are out of order.",
)
def test_change_list_sorting_model_admin(self):
"""
Ensure we can sort on a list_display field that is a ModelAdmin method
(column 4 is 'modeladmin_year' in ArticleAdmin)
"""
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": "4"}
)
self.assertContentBefore(
response,
"Oldest content",
"Middle content",
"Results of sorting on ModelAdmin method are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Newest content",
"Results of sorting on ModelAdmin method are out of order.",
)
def test_change_list_sorting_model_admin_reverse(self):
"""
Ensure we can sort on a list_display field that is a ModelAdmin
method in reverse order (i.e. admin_order_field uses the '-' prefix)
(column 6 is 'model_year_reverse' in ArticleAdmin)
"""
td = '<td class="field-model_property_year">%s</td>'
td_2000, td_2008, td_2009 = td % 2000, td % 2008, td % 2009
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": "6"}
)
self.assertContentBefore(
response,
td_2009,
td_2008,
"Results of sorting on ModelAdmin method are out of order.",
)
self.assertContentBefore(
response,
td_2008,
td_2000,
"Results of sorting on ModelAdmin method are out of order.",
)
# Let's make sure the ordering is right and that we don't get a
# FieldError when we change to descending order
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": "-6"}
)
self.assertContentBefore(
response,
td_2000,
td_2008,
"Results of sorting on ModelAdmin method are out of order.",
)
self.assertContentBefore(
response,
td_2008,
td_2009,
"Results of sorting on ModelAdmin method are out of order.",
)
def test_change_list_sorting_multiple(self):
p1 = Person.objects.create(name="Chris", gender=1, alive=True)
p2 = Person.objects.create(name="Chris", gender=2, alive=True)
p3 = Person.objects.create(name="Bob", gender=1, alive=True)
link1 = reverse("admin:admin_views_person_change", args=(p1.pk,))
link2 = reverse("admin:admin_views_person_change", args=(p2.pk,))
link3 = reverse("admin:admin_views_person_change", args=(p3.pk,))
# Sort by name, gender
response = self.client.get(
reverse("admin:admin_views_person_changelist"), {"o": "1.2"}
)
self.assertContentBefore(response, link3, link1)
self.assertContentBefore(response, link1, link2)
# Sort by gender descending, name
response = self.client.get(
reverse("admin:admin_views_person_changelist"), {"o": "-2.1"}
)
self.assertContentBefore(response, link2, link3)
self.assertContentBefore(response, link3, link1)
def test_change_list_sorting_preserve_queryset_ordering(self):
"""
If no ordering is defined in `ModelAdmin.ordering` or in the query
string, then the underlying order of the queryset should not be
changed, even if it is defined in `Modeladmin.get_queryset()`.
Refs #11868, #7309.
"""
p1 = Person.objects.create(name="Amy", gender=1, alive=True, age=80)
p2 = Person.objects.create(name="Bob", gender=1, alive=True, age=70)
p3 = Person.objects.create(name="Chris", gender=2, alive=False, age=60)
link1 = reverse("admin:admin_views_person_change", args=(p1.pk,))
link2 = reverse("admin:admin_views_person_change", args=(p2.pk,))
link3 = reverse("admin:admin_views_person_change", args=(p3.pk,))
response = self.client.get(reverse("admin:admin_views_person_changelist"), {})
self.assertContentBefore(response, link3, link2)
self.assertContentBefore(response, link2, link1)
def test_change_list_sorting_model_meta(self):
# Test ordering on Model Meta is respected
l1 = Language.objects.create(iso="ur", name="Urdu")
l2 = Language.objects.create(iso="ar", name="Arabic")
link1 = reverse("admin:admin_views_language_change", args=(quote(l1.pk),))
link2 = reverse("admin:admin_views_language_change", args=(quote(l2.pk),))
response = self.client.get(reverse("admin:admin_views_language_changelist"), {})
self.assertContentBefore(response, link2, link1)
# Test we can override with query string
response = self.client.get(
reverse("admin:admin_views_language_changelist"), {"o": "-1"}
)
self.assertContentBefore(response, link1, link2)
def test_change_list_sorting_override_model_admin(self):
# Test ordering on Model Admin is respected, and overrides Model Meta
dt = datetime.datetime.now()
p1 = Podcast.objects.create(name="A", release_date=dt)
p2 = Podcast.objects.create(name="B", release_date=dt - datetime.timedelta(10))
link1 = reverse("admin:admin_views_podcast_change", args=(p1.pk,))
link2 = reverse("admin:admin_views_podcast_change", args=(p2.pk,))
response = self.client.get(reverse("admin:admin_views_podcast_changelist"), {})
self.assertContentBefore(response, link1, link2)
def test_multiple_sort_same_field(self):
# The changelist displays the correct columns if two columns correspond
# to the same ordering field.
dt = datetime.datetime.now()
p1 = Podcast.objects.create(name="A", release_date=dt)
p2 = Podcast.objects.create(name="B", release_date=dt - datetime.timedelta(10))
link1 = reverse("admin:admin_views_podcast_change", args=(quote(p1.pk),))
link2 = reverse("admin:admin_views_podcast_change", args=(quote(p2.pk),))
response = self.client.get(reverse("admin:admin_views_podcast_changelist"), {})
self.assertContentBefore(response, link1, link2)
p1 = ComplexSortedPerson.objects.create(name="Bob", age=10)
p2 = ComplexSortedPerson.objects.create(name="Amy", age=20)
link1 = reverse("admin:admin_views_complexsortedperson_change", args=(p1.pk,))
link2 = reverse("admin:admin_views_complexsortedperson_change", args=(p2.pk,))
response = self.client.get(
reverse("admin:admin_views_complexsortedperson_changelist"), {}
)
# Should have 5 columns (including action checkbox col)
self.assertContains(response, '<th scope="col"', count=5)
self.assertContains(response, "Name")
self.assertContains(response, "Colored name")
# Check order
self.assertContentBefore(response, "Name", "Colored name")
# Check sorting - should be by name
self.assertContentBefore(response, link2, link1)
def test_sort_indicators_admin_order(self):
"""
The admin shows default sort indicators for all kinds of 'ordering'
fields: field names, method on the model admin and model itself, and
other callables. See #17252.
"""
models = [
(AdminOrderedField, "adminorderedfield"),
(AdminOrderedModelMethod, "adminorderedmodelmethod"),
(AdminOrderedAdminMethod, "adminorderedadminmethod"),
(AdminOrderedCallable, "adminorderedcallable"),
]
for model, url in models:
model.objects.create(stuff="The Last Item", order=3)
model.objects.create(stuff="The First Item", order=1)
model.objects.create(stuff="The Middle Item", order=2)
response = self.client.get(
reverse("admin:admin_views_%s_changelist" % url), {}
)
# Should have 3 columns including action checkbox col.
self.assertContains(response, '<th scope="col"', count=3, msg_prefix=url)
# Check if the correct column was selected. 2 is the index of the
# 'order' column in the model admin's 'list_display' with 0 being
# the implicit 'action_checkbox' and 1 being the column 'stuff'.
self.assertEqual(
response.context["cl"].get_ordering_field_columns(), {2: "asc"}
)
# Check order of records.
self.assertContentBefore(response, "The First Item", "The Middle Item")
self.assertContentBefore(response, "The Middle Item", "The Last Item")
def test_has_related_field_in_list_display_fk(self):
"""Joins shouldn't be performed for <FK>_id fields in list display."""
state = State.objects.create(name="Karnataka")
City.objects.create(state=state, name="Bangalore")
response = self.client.get(reverse("admin:admin_views_city_changelist"), {})
response.context["cl"].list_display = ["id", "name", "state"]
self.assertIs(response.context["cl"].has_related_field_in_list_display(), True)
response.context["cl"].list_display = ["id", "name", "state_id"]
self.assertIs(response.context["cl"].has_related_field_in_list_display(), False)
def test_has_related_field_in_list_display_o2o(self):
"""Joins shouldn't be performed for <O2O>_id fields in list display."""
media = Media.objects.create(name="Foo")
Vodcast.objects.create(media=media)
response = self.client.get(reverse("admin:admin_views_vodcast_changelist"), {})
response.context["cl"].list_display = ["media"]
self.assertIs(response.context["cl"].has_related_field_in_list_display(), True)
response.context["cl"].list_display = ["media_id"]
self.assertIs(response.context["cl"].has_related_field_in_list_display(), False)
def test_limited_filter(self):
"""
Admin changelist filters do not contain objects excluded via
limit_choices_to.
"""
response = self.client.get(reverse("admin:admin_views_thing_changelist"))
self.assertContains(
response,
'<div id="changelist-filter">',
msg_prefix="Expected filter not found in changelist view",
)
self.assertNotContains(
response,
'<a href="?color__id__exact=3">Blue</a>',
msg_prefix="Changelist filter not correctly limited by limit_choices_to",
)
def test_relation_spanning_filters(self):
changelist_url = reverse("admin:admin_views_chapterxtra1_changelist")
response = self.client.get(changelist_url)
self.assertContains(response, '<div id="changelist-filter">')
filters = {
"chap__id__exact": {
"values": [c.id for c in Chapter.objects.all()],
"test": lambda obj, value: obj.chap.id == value,
},
"chap__title": {
"values": [c.title for c in Chapter.objects.all()],
"test": lambda obj, value: obj.chap.title == value,
},
"chap__book__id__exact": {
"values": [b.id for b in Book.objects.all()],
"test": lambda obj, value: obj.chap.book.id == value,
},
"chap__book__name": {
"values": [b.name for b in Book.objects.all()],
"test": lambda obj, value: obj.chap.book.name == value,
},
"chap__book__promo__id__exact": {
"values": [p.id for p in Promo.objects.all()],
"test": lambda obj, value: obj.chap.book.promo_set.filter(
id=value
).exists(),
},
"chap__book__promo__name": {
"values": [p.name for p in Promo.objects.all()],
"test": lambda obj, value: obj.chap.book.promo_set.filter(
name=value
).exists(),
},
# A forward relation (book) after a reverse relation (promo).
"guest_author__promo__book__id__exact": {
"values": [p.id for p in Book.objects.all()],
"test": lambda obj, value: obj.guest_author.promo_set.filter(
book=value
).exists(),
},
}
for filter_path, params in filters.items():
for value in params["values"]:
query_string = urlencode({filter_path: value})
# ensure filter link exists
self.assertContains(response, '<a href="?%s"' % query_string)
# ensure link works
filtered_response = self.client.get(
"%s?%s" % (changelist_url, query_string)
)
self.assertEqual(filtered_response.status_code, 200)
# ensure changelist contains only valid objects
for obj in filtered_response.context["cl"].queryset.all():
self.assertTrue(params["test"](obj, value))
def test_incorrect_lookup_parameters(self):
"""Ensure incorrect lookup parameters are handled gracefully."""
changelist_url = reverse("admin:admin_views_thing_changelist")
response = self.client.get(changelist_url, {"notarealfield": "5"})
self.assertRedirects(response, "%s?e=1" % changelist_url)
# Spanning relationships through a nonexistent related object (Refs #16716)
response = self.client.get(changelist_url, {"notarealfield__whatever": "5"})
self.assertRedirects(response, "%s?e=1" % changelist_url)
response = self.client.get(
changelist_url, {"color__id__exact": "StringNotInteger!"}
)
self.assertRedirects(response, "%s?e=1" % changelist_url)
# Regression test for #18530
response = self.client.get(changelist_url, {"pub_date__gte": "foo"})
self.assertRedirects(response, "%s?e=1" % changelist_url)
def test_isnull_lookups(self):
"""Ensure is_null is handled correctly."""
Article.objects.create(
title="I Could Go Anywhere",
content="Versatile",
date=datetime.datetime.now(),
)
changelist_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(changelist_url)
self.assertContains(response, "4 articles")
response = self.client.get(changelist_url, {"section__isnull": "false"})
self.assertContains(response, "3 articles")
response = self.client.get(changelist_url, {"section__isnull": "0"})
self.assertContains(response, "3 articles")
response = self.client.get(changelist_url, {"section__isnull": "true"})
self.assertContains(response, "1 article")
response = self.client.get(changelist_url, {"section__isnull": "1"})
self.assertContains(response, "1 article")
def test_logout_and_password_change_URLs(self):
response = self.client.get(reverse("admin:admin_views_article_changelist"))
self.assertContains(
response,
'<form id="logout-form" method="post" action="%s">'
% reverse("admin:logout"),
)
self.assertContains(
response, '<a href="%s">' % reverse("admin:password_change")
)
def test_named_group_field_choices_change_list(self):
"""
Ensures the admin changelist shows correct values in the relevant column
for rows corresponding to instances of a model in which a named group
has been used in the choices option of a field.
"""
link1 = reverse("admin:admin_views_fabric_change", args=(self.fab1.pk,))
link2 = reverse("admin:admin_views_fabric_change", args=(self.fab2.pk,))
response = self.client.get(reverse("admin:admin_views_fabric_changelist"))
fail_msg = (
"Changelist table isn't showing the right human-readable values "
"set by a model field 'choices' option named group."
)
self.assertContains(
response,
'<a href="%s">Horizontal</a>' % link1,
msg_prefix=fail_msg,
html=True,
)
self.assertContains(
response,
'<a href="%s">Vertical</a>' % link2,
msg_prefix=fail_msg,
html=True,
)
def test_named_group_field_choices_filter(self):
"""
Ensures the filter UI shows correctly when at least one named group has
been used in the choices option of a model field.
"""
response = self.client.get(reverse("admin:admin_views_fabric_changelist"))
fail_msg = (
"Changelist filter isn't showing options contained inside a model "
"field 'choices' option named group."
)
self.assertContains(response, '<div id="changelist-filter">')
self.assertContains(
response,
'<a href="?surface__exact=x">Horizontal</a>',
msg_prefix=fail_msg,
html=True,
)
self.assertContains(
response,
'<a href="?surface__exact=y">Vertical</a>',
msg_prefix=fail_msg,
html=True,
)
def test_change_list_null_boolean_display(self):
Post.objects.create(public=None)
response = self.client.get(reverse("admin:admin_views_post_changelist"))
self.assertContains(response, "icon-unknown.svg")
def test_display_decorator_with_boolean_and_empty_value(self):
msg = (
"The boolean and empty_value arguments to the @display decorator "
"are mutually exclusive."
)
with self.assertRaisesMessage(ValueError, msg):
class BookAdmin(admin.ModelAdmin):
@admin.display(boolean=True, empty_value="(Missing)")
def is_published(self, obj):
return obj.publish_date is not None
def test_i18n_language_non_english_default(self):
"""
Check if the JavaScript i18n view returns an empty language catalog
if the default language is non-English but the selected language
is English. See #13388 and #3594 for more details.
"""
with self.settings(LANGUAGE_CODE="fr"), translation.override("en-us"):
response = self.client.get(reverse("admin:jsi18n"))
self.assertNotContains(response, "Choisir une heure")
def test_i18n_language_non_english_fallback(self):
"""
Makes sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE="fr"), translation.override("none"):
response = self.client.get(reverse("admin:jsi18n"))
self.assertContains(response, "Choisir une heure")
def test_jsi18n_with_context(self):
response = self.client.get(reverse("admin-extra-context:jsi18n"))
self.assertEqual(response.status_code, 200)
def test_jsi18n_format_fallback(self):
"""
The JavaScript i18n view doesn't return localized date/time formats
when the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE="ru"), translation.override("none"):
response = self.client.get(reverse("admin:jsi18n"))
self.assertNotContains(response, "%d.%m.%Y %H:%M:%S")
self.assertContains(response, "%Y-%m-%d %H:%M:%S")
def test_disallowed_filtering(self):
with self.assertLogs("django.security.DisallowedModelAdminLookup", "ERROR"):
response = self.client.get(
"%s?owner__email__startswith=fuzzy"
% reverse("admin:admin_views_album_changelist")
)
self.assertEqual(response.status_code, 400)
# Filters are allowed if explicitly included in list_filter
response = self.client.get(
"%s?color__value__startswith=red"
% reverse("admin:admin_views_thing_changelist")
)
self.assertEqual(response.status_code, 200)
response = self.client.get(
"%s?color__value=red" % reverse("admin:admin_views_thing_changelist")
)
self.assertEqual(response.status_code, 200)
# Filters should be allowed if they involve a local field without the
# need to allow them in list_filter or date_hierarchy.
response = self.client.get(
"%s?age__gt=30" % reverse("admin:admin_views_person_changelist")
)
self.assertEqual(response.status_code, 200)
e1 = Employee.objects.create(
name="Anonymous", gender=1, age=22, alive=True, code="123"
)
e2 = Employee.objects.create(
name="Visitor", gender=2, age=19, alive=True, code="124"
)
WorkHour.objects.create(datum=datetime.datetime.now(), employee=e1)
WorkHour.objects.create(datum=datetime.datetime.now(), employee=e2)
response = self.client.get(reverse("admin:admin_views_workhour_changelist"))
self.assertContains(response, "employee__person_ptr__exact")
response = self.client.get(
"%s?employee__person_ptr__exact=%d"
% (reverse("admin:admin_views_workhour_changelist"), e1.pk)
)
self.assertEqual(response.status_code, 200)
def test_disallowed_to_field(self):
url = reverse("admin:admin_views_section_changelist")
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.get(url, {TO_FIELD_VAR: "missing_field"})
self.assertEqual(response.status_code, 400)
# Specifying a field that is not referred by any other model registered
# to this admin site should raise an exception.
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.get(
reverse("admin:admin_views_section_changelist"), {TO_FIELD_VAR: "name"}
)
self.assertEqual(response.status_code, 400)
# Primary key should always be allowed, even if the referenced model
# isn't registered.
response = self.client.get(
reverse("admin:admin_views_notreferenced_changelist"), {TO_FIELD_VAR: "id"}
)
self.assertEqual(response.status_code, 200)
# Specifying a field referenced by another model though a m2m should be
# allowed.
response = self.client.get(
reverse("admin:admin_views_recipe_changelist"), {TO_FIELD_VAR: "rname"}
)
self.assertEqual(response.status_code, 200)
# Specifying a field referenced through a reverse m2m relationship
# should be allowed.
response = self.client.get(
reverse("admin:admin_views_ingredient_changelist"), {TO_FIELD_VAR: "iname"}
)
self.assertEqual(response.status_code, 200)
# Specifying a field that is not referred by any other model directly
# registered to this admin site but registered through inheritance
# should be allowed.
response = self.client.get(
reverse("admin:admin_views_referencedbyparent_changelist"),
{TO_FIELD_VAR: "name"},
)
self.assertEqual(response.status_code, 200)
# Specifying a field that is only referred to by a inline of a
# registered model should be allowed.
response = self.client.get(
reverse("admin:admin_views_referencedbyinline_changelist"),
{TO_FIELD_VAR: "name"},
)
self.assertEqual(response.status_code, 200)
# #25622 - Specifying a field of a model only referred by a generic
# relation should raise DisallowedModelAdminToField.
url = reverse("admin:admin_views_referencedbygenrel_changelist")
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.get(url, {TO_FIELD_VAR: "object_id"})
self.assertEqual(response.status_code, 400)
# We also want to prevent the add, change, and delete views from
# leaking a disallowed field value.
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.post(
reverse("admin:admin_views_section_add"), {TO_FIELD_VAR: "name"}
)
self.assertEqual(response.status_code, 400)
section = Section.objects.create()
url = reverse("admin:admin_views_section_change", args=(section.pk,))
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.post(url, {TO_FIELD_VAR: "name"})
self.assertEqual(response.status_code, 400)
url = reverse("admin:admin_views_section_delete", args=(section.pk,))
with self.assertLogs("django.security.DisallowedModelAdminToField", "ERROR"):
response = self.client.post(url, {TO_FIELD_VAR: "name"})
self.assertEqual(response.status_code, 400)
def test_allowed_filtering_15103(self):
"""
Regressions test for ticket 15103 - filtering on fields defined in a
ForeignKey 'limit_choices_to' should be allowed, otherwise raw_id_fields
can break.
"""
# Filters should be allowed if they are defined on a ForeignKey
# pointing to this model.
url = "%s?leader__name=Palin&leader__age=27" % reverse(
"admin:admin_views_inquisition_changelist"
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_popup_dismiss_related(self):
"""
Regression test for ticket 20664 - ensure the pk is properly quoted.
"""
actor = Actor.objects.create(name="Palin", age=27)
response = self.client.get(
"%s?%s" % (reverse("admin:admin_views_actor_changelist"), IS_POPUP_VAR)
)
self.assertContains(response, 'data-popup-opener="%s"' % actor.pk)
def test_hide_change_password(self):
"""
Tests if the "change password" link in the admin is hidden if the User
does not have a usable password set.
(against 9bea85795705d015cdadc82c68b99196a8554f5c)
"""
user = User.objects.get(username="super")
user.set_unusable_password()
user.save()
self.client.force_login(user)
response = self.client.get(reverse("admin:index"))
self.assertNotContains(
response,
reverse("admin:password_change"),
msg_prefix=(
'The "change password" link should not be displayed if a user does not '
"have a usable password."
),
)
def test_change_view_with_show_delete_extra_context(self):
"""
The 'show_delete' context variable in the admin's change view controls
the display of the delete button.
"""
instance = UndeletableObject.objects.create(name="foo")
response = self.client.get(
reverse("admin:admin_views_undeletableobject_change", args=(instance.pk,))
)
self.assertNotContains(response, "deletelink")
def test_change_view_logs_m2m_field_changes(self):
"""Changes to ManyToManyFields are included in the object's history."""
pizza = ReadablePizza.objects.create(name="Cheese")
cheese = Topping.objects.create(name="cheese")
post_data = {"name": pizza.name, "toppings": [cheese.pk]}
response = self.client.post(
reverse("admin:admin_views_readablepizza_change", args=(pizza.pk,)),
post_data,
)
self.assertRedirects(
response, reverse("admin:admin_views_readablepizza_changelist")
)
pizza_ctype = ContentType.objects.get_for_model(
ReadablePizza, for_concrete_model=False
)
log = LogEntry.objects.filter(
content_type=pizza_ctype, object_id=pizza.pk
).first()
self.assertEqual(log.get_change_message(), "Changed Toppings.")
def test_allows_attributeerror_to_bubble_up(self):
"""
AttributeErrors are allowed to bubble when raised inside a change list
view. Requires a model to be created so there's something to display.
Refs: #16655, #18593, and #18747
"""
Simple.objects.create()
with self.assertRaises(AttributeError):
self.client.get(reverse("admin:admin_views_simple_changelist"))
def test_changelist_with_no_change_url(self):
"""
ModelAdmin.changelist_view shouldn't result in a NoReverseMatch if url
for change_view is removed from get_urls (#20934).
"""
o = UnchangeableObject.objects.create()
response = self.client.get(
reverse("admin:admin_views_unchangeableobject_changelist")
)
# Check the format of the shown object -- shouldn't contain a change link
self.assertContains(
response, '<th class="field-__str__">%s</th>' % o, html=True
)
def test_invalid_appindex_url(self):
"""
#21056 -- URL reversing shouldn't work for nonexistent apps.
"""
good_url = "/test_admin/admin/admin_views/"
confirm_good_url = reverse(
"admin:app_list", kwargs={"app_label": "admin_views"}
)
self.assertEqual(good_url, confirm_good_url)
with self.assertRaises(NoReverseMatch):
reverse("admin:app_list", kwargs={"app_label": "this_should_fail"})
with self.assertRaises(NoReverseMatch):
reverse("admin:app_list", args=("admin_views2",))
def test_resolve_admin_views(self):
index_match = resolve("/test_admin/admin4/")
list_match = resolve("/test_admin/admin4/auth/user/")
self.assertIs(index_match.func.admin_site, customadmin.simple_site)
self.assertIsInstance(
list_match.func.model_admin, customadmin.CustomPwdTemplateUserAdmin
)
def test_adminsite_display_site_url(self):
"""
#13749 - Admin should display link to front-end site 'View site'
"""
url = reverse("admin:index")
response = self.client.get(url)
self.assertEqual(response.context["site_url"], "/my-site-url/")
self.assertContains(response, '<a href="/my-site-url/">View site</a>')
def test_date_hierarchy_empty_queryset(self):
self.assertIs(Question.objects.exists(), False)
response = self.client.get(reverse("admin:admin_views_answer2_changelist"))
self.assertEqual(response.status_code, 200)
@override_settings(TIME_ZONE="America/Sao_Paulo", USE_TZ=True)
def test_date_hierarchy_timezone_dst(self):
# This datetime doesn't exist in this timezone due to DST.
for date in make_aware_datetimes(
datetime.datetime(2016, 10, 16, 15), "America/Sao_Paulo"
):
with self.subTest(repr(date.tzinfo)):
q = Question.objects.create(question="Why?", expires=date)
Answer2.objects.create(question=q, answer="Because.")
response = self.client.get(
reverse("admin:admin_views_answer2_changelist")
)
self.assertContains(response, "question__expires__day=16")
self.assertContains(response, "question__expires__month=10")
self.assertContains(response, "question__expires__year=2016")
@override_settings(TIME_ZONE="America/Los_Angeles", USE_TZ=True)
def test_date_hierarchy_local_date_differ_from_utc(self):
# This datetime is 2017-01-01 in UTC.
for date in make_aware_datetimes(
datetime.datetime(2016, 12, 31, 16), "America/Los_Angeles"
):
with self.subTest(repr(date.tzinfo)):
q = Question.objects.create(question="Why?", expires=date)
Answer2.objects.create(question=q, answer="Because.")
response = self.client.get(
reverse("admin:admin_views_answer2_changelist")
)
self.assertContains(response, "question__expires__day=31")
self.assertContains(response, "question__expires__month=12")
self.assertContains(response, "question__expires__year=2016")
def test_sortable_by_columns_subset(self):
expected_sortable_fields = ("date", "callable_year")
expected_not_sortable_fields = (
"content",
"model_year",
"modeladmin_year",
"model_year_reversed",
"section",
)
response = self.client.get(reverse("admin6:admin_views_article_changelist"))
for field_name in expected_sortable_fields:
self.assertContains(
response, '<th scope="col" class="sortable column-%s">' % field_name
)
for field_name in expected_not_sortable_fields:
self.assertContains(
response, '<th scope="col" class="column-%s">' % field_name
)
def test_get_sortable_by_columns_subset(self):
response = self.client.get(reverse("admin6:admin_views_actor_changelist"))
self.assertContains(response, '<th scope="col" class="sortable column-age">')
self.assertContains(response, '<th scope="col" class="column-name">')
def test_sortable_by_no_column(self):
expected_not_sortable_fields = ("title", "book")
response = self.client.get(reverse("admin6:admin_views_chapter_changelist"))
for field_name in expected_not_sortable_fields:
self.assertContains(
response, '<th scope="col" class="column-%s">' % field_name
)
self.assertNotContains(response, '<th scope="col" class="sortable column')
def test_get_sortable_by_no_column(self):
response = self.client.get(reverse("admin6:admin_views_color_changelist"))
self.assertContains(response, '<th scope="col" class="column-value">')
self.assertNotContains(response, '<th scope="col" class="sortable column')
def test_app_index_context(self):
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertContains(
response,
"<title>Admin_Views administration | Django site admin</title>",
)
self.assertEqual(response.context["title"], "Admin_Views administration")
self.assertEqual(response.context["app_label"], "admin_views")
# Models are sorted alphabetically by default.
models = [model["name"] for model in response.context["app_list"][0]["models"]]
self.assertSequenceEqual(models, sorted(models))
def test_app_index_context_reordered(self):
self.client.force_login(self.superuser)
response = self.client.get(reverse("admin2:app_list", args=("admin_views",)))
self.assertContains(
response,
"<title>Admin_Views administration | Django site admin</title>",
)
# Models are in reverse order.
models = [model["name"] for model in response.context["app_list"][0]["models"]]
self.assertSequenceEqual(models, sorted(models, reverse=True))
def test_change_view_subtitle_per_object(self):
response = self.client.get(
reverse("admin:admin_views_article_change", args=(self.a1.pk,)),
)
self.assertContains(
response,
"<title>Article 1 | Change article | Django site admin</title>",
)
self.assertContains(response, "<h1>Change article</h1>")
self.assertContains(response, "<h2>Article 1</h2>")
response = self.client.get(
reverse("admin:admin_views_article_change", args=(self.a2.pk,)),
)
self.assertContains(
response,
"<title>Article 2 | Change article | Django site admin</title>",
)
self.assertContains(response, "<h1>Change article</h1>")
self.assertContains(response, "<h2>Article 2</h2>")
def test_view_subtitle_per_object(self):
viewuser = User.objects.create_user(
username="viewuser",
password="secret",
is_staff=True,
)
viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("view", Article._meta)),
)
self.client.force_login(viewuser)
response = self.client.get(
reverse("admin:admin_views_article_change", args=(self.a1.pk,)),
)
self.assertContains(
response,
"<title>Article 1 | View article | Django site admin</title>",
)
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(response, "<h2>Article 1</h2>")
response = self.client.get(
reverse("admin:admin_views_article_change", args=(self.a2.pk,)),
)
self.assertContains(
response,
"<title>Article 2 | View article | Django site admin</title>",
)
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(response, "<h2>Article 2</h2>")
def test_formset_kwargs_can_be_overridden(self):
response = self.client.get(reverse("admin:admin_views_city_add"))
self.assertContains(response, "overridden_name")
def test_render_views_no_subtitle(self):
tests = [
reverse("admin:index"),
reverse("admin:password_change"),
reverse("admin:app_list", args=("admin_views",)),
reverse("admin:admin_views_article_delete", args=(self.a1.pk,)),
reverse("admin:admin_views_article_history", args=(self.a1.pk,)),
]
for url in tests:
with self.subTest(url=url):
with self.assertNoLogs("django.template", "DEBUG"):
self.client.get(url)
# Login must be after logout.
with self.assertNoLogs("django.template", "DEBUG"):
self.client.post(reverse("admin:logout"))
self.client.get(reverse("admin:login"))
def test_render_delete_selected_confirmation_no_subtitle(self):
post_data = {
"action": "delete_selected",
"selected_across": "0",
"index": "0",
"_selected_action": self.a1.pk,
}
with self.assertNoLogs("django.template", "DEBUG"):
self.client.post(reverse("admin:admin_views_article_changelist"), post_data)
@override_settings(
AUTH_PASSWORD_VALIDATORS=[
{
"NAME": (
"django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator"
)
},
{
"NAME": (
"django.contrib.auth.password_validation."
"NumericPasswordValidator"
)
},
]
)
def test_password_change_helptext(self):
response = self.client.get(reverse("admin:password_change"))
self.assertContains(
response, '<div class="help" id="id_new_password1_helptext">'
)
@override_settings(
AUTH_PASSWORD_VALIDATORS=[
{
"NAME": (
"django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator"
)
},
{
"NAME": (
"django.contrib.auth.password_validation." "NumericPasswordValidator"
)
},
],
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
# Put this app's and the shared tests templates dirs in DIRS to
# take precedence over the admin's templates dir.
"DIRS": [
os.path.join(os.path.dirname(__file__), "templates"),
os.path.join(os.path.dirname(os.path.dirname(__file__)), "templates"),
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
class AdminCustomTemplateTests(AdminViewBasicTestCase):
def test_custom_model_admin_templates(self):
# Test custom change list template with custom extra context
response = self.client.get(
reverse("admin:admin_views_customarticle_changelist")
)
self.assertContains(response, "var hello = 'Hello!';")
self.assertTemplateUsed(response, "custom_admin/change_list.html")
# Test custom add form template
response = self.client.get(reverse("admin:admin_views_customarticle_add"))
self.assertTemplateUsed(response, "custom_admin/add_form.html")
# Add an article so we can test delete, change, and history views
post = self.client.post(
reverse("admin:admin_views_customarticle_add"),
{
"content": "<p>great article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
},
)
self.assertRedirects(
post, reverse("admin:admin_views_customarticle_changelist")
)
self.assertEqual(CustomArticle.objects.count(), 1)
article_pk = CustomArticle.objects.all()[0].pk
# Test custom delete, change, and object history templates
# Test custom change form template
response = self.client.get(
reverse("admin:admin_views_customarticle_change", args=(article_pk,))
)
self.assertTemplateUsed(response, "custom_admin/change_form.html")
response = self.client.get(
reverse("admin:admin_views_customarticle_delete", args=(article_pk,))
)
self.assertTemplateUsed(response, "custom_admin/delete_confirmation.html")
response = self.client.post(
reverse("admin:admin_views_customarticle_changelist"),
data={
"index": 0,
"action": ["delete_selected"],
"_selected_action": ["1"],
},
)
self.assertTemplateUsed(
response, "custom_admin/delete_selected_confirmation.html"
)
response = self.client.get(
reverse("admin:admin_views_customarticle_history", args=(article_pk,))
)
self.assertTemplateUsed(response, "custom_admin/object_history.html")
# A custom popup response template may be specified by
# ModelAdmin.popup_response_template.
response = self.client.post(
reverse("admin:admin_views_customarticle_add") + "?%s=1" % IS_POPUP_VAR,
{
"content": "<p>great article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
IS_POPUP_VAR: "1",
},
)
self.assertEqual(response.template_name, "custom_admin/popup_response.html")
def test_extended_bodyclass_template_change_form(self):
"""
The admin/change_form.html template uses block.super in the
bodyclass block.
"""
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_change_password_template(self):
user = User.objects.get(username="super")
response = self.client.get(
reverse("admin:auth_user_password_change", args=(user.id,))
)
# The auth/user/change_password.html template uses super in the
# bodyclass block.
self.assertContains(response, "bodyclass_consistency_check ")
# When a site has multiple passwords in the browser's password manager,
# a browser pop up asks which user the new password is for. To prevent
# this, the username is added to the change password form.
self.assertContains(
response, '<input type="text" name="username" value="super" class="hidden">'
)
# help text for passwords has an id.
self.assertContains(
response,
'<div class="help" id="id_password1_helptext"><ul><li>'
"Your password can’t be too similar to your other personal information."
"</li><li>Your password can’t be entirely numeric.</li></ul></div>",
)
self.assertContains(
response,
'<div class="help" id="id_password2_helptext">'
"Enter the same password as before, for verification.</div>",
)
def test_extended_bodyclass_template_index(self):
"""
The admin/index.html template uses block.super in the bodyclass block.
"""
response = self.client.get(reverse("admin:index"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_change_list(self):
"""
The admin/change_list.html' template uses block.super
in the bodyclass block.
"""
response = self.client.get(reverse("admin:admin_views_article_changelist"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_template_login(self):
"""
The admin/login.html template uses block.super in the
bodyclass block.
"""
self.client.logout()
response = self.client.get(reverse("admin:login"))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_template_delete_confirmation(self):
"""
The admin/delete_confirmation.html template uses
block.super in the bodyclass block.
"""
group = Group.objects.create(name="foogroup")
response = self.client.get(reverse("admin:auth_group_delete", args=(group.id,)))
self.assertContains(response, "bodyclass_consistency_check ")
def test_extended_bodyclass_template_delete_selected_confirmation(self):
"""
The admin/delete_selected_confirmation.html template uses
block.super in bodyclass block.
"""
group = Group.objects.create(name="foogroup")
post_data = {
"action": "delete_selected",
"selected_across": "0",
"index": "0",
"_selected_action": group.id,
}
response = self.client.post(reverse("admin:auth_group_changelist"), post_data)
self.assertEqual(response.context["site_header"], "Django administration")
self.assertContains(response, "bodyclass_consistency_check ")
def test_filter_with_custom_template(self):
"""
A custom template can be used to render an admin filter.
"""
response = self.client.get(reverse("admin:admin_views_color2_changelist"))
self.assertTemplateUsed(response, "custom_filter_template.html")
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewFormUrlTest(TestCase):
current_app = "admin3"
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_change_form_URL_has_correct_value(self):
"""
change_view has form_url in response.context
"""
response = self.client.get(
reverse(
"admin:admin_views_section_change",
args=(self.s1.pk,),
current_app=self.current_app,
)
)
self.assertIn(
"form_url", response.context, msg="form_url not present in response.context"
)
self.assertEqual(response.context["form_url"], "pony")
def test_initial_data_can_be_overridden(self):
"""
The behavior for setting initial form data can be overridden in the
ModelAdmin class. Usually, the initial value is set via the GET params.
"""
response = self.client.get(
reverse("admin:admin_views_restaurant_add", current_app=self.current_app),
{"name": "test_value"},
)
# this would be the usual behaviour
self.assertNotContains(response, 'value="test_value"')
# this is the overridden behaviour
self.assertContains(response, 'value="overridden_value"')
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminJavaScriptTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_js_minified_only_if_debug_is_false(self):
"""
The minified versions of the JS files are only used when DEBUG is False.
"""
with override_settings(DEBUG=False):
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertNotContains(response, "vendor/jquery/jquery.js")
self.assertContains(response, "vendor/jquery/jquery.min.js")
self.assertContains(response, "prepopulate.js")
self.assertContains(response, "actions.js")
self.assertContains(response, "collapse.js")
self.assertContains(response, "inlines.js")
with override_settings(DEBUG=True):
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertContains(response, "vendor/jquery/jquery.js")
self.assertNotContains(response, "vendor/jquery/jquery.min.js")
self.assertContains(response, "prepopulate.js")
self.assertContains(response, "actions.js")
self.assertContains(response, "collapse.js")
self.assertContains(response, "inlines.js")
@override_settings(ROOT_URLCONF="admin_views.urls")
class SaveAsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.per1 = Person.objects.create(name="John Mauchly", gender=1, alive=True)
def setUp(self):
self.client.force_login(self.superuser)
def test_save_as_duplication(self):
"""'save as' creates a new person"""
post_data = {"_saveasnew": "", "name": "John M", "gender": 1, "age": 42}
response = self.client.post(
reverse("admin:admin_views_person_change", args=(self.per1.pk,)), post_data
)
self.assertEqual(len(Person.objects.filter(name="John M")), 1)
self.assertEqual(len(Person.objects.filter(id=self.per1.pk)), 1)
new_person = Person.objects.latest("id")
self.assertRedirects(
response, reverse("admin:admin_views_person_change", args=(new_person.pk,))
)
def test_save_as_continue_false(self):
"""
Saving a new object using "Save as new" redirects to the changelist
instead of the change view when ModelAdmin.save_as_continue=False.
"""
post_data = {"_saveasnew": "", "name": "John M", "gender": 1, "age": 42}
url = reverse(
"admin:admin_views_person_change",
args=(self.per1.pk,),
current_app=site2.name,
)
response = self.client.post(url, post_data)
self.assertEqual(len(Person.objects.filter(name="John M")), 1)
self.assertEqual(len(Person.objects.filter(id=self.per1.pk)), 1)
self.assertRedirects(
response,
reverse("admin:admin_views_person_changelist", current_app=site2.name),
)
def test_save_as_new_with_validation_errors(self):
"""
When you click "Save as new" and have a validation error,
you only see the "Save as new" button and not the other save buttons,
and that only the "Save as" button is visible.
"""
response = self.client.post(
reverse("admin:admin_views_person_change", args=(self.per1.pk,)),
{
"_saveasnew": "",
"gender": "invalid",
"_addanother": "fail",
},
)
self.assertContains(response, "Please correct the errors below.")
self.assertFalse(response.context["show_save_and_add_another"])
self.assertFalse(response.context["show_save_and_continue"])
self.assertTrue(response.context["show_save_as_new"])
def test_save_as_new_with_validation_errors_with_inlines(self):
parent = Parent.objects.create(name="Father")
child = Child.objects.create(parent=parent, name="Child")
response = self.client.post(
reverse("admin:admin_views_parent_change", args=(parent.pk,)),
{
"_saveasnew": "Save as new",
"child_set-0-parent": parent.pk,
"child_set-0-id": child.pk,
"child_set-0-name": "Child",
"child_set-INITIAL_FORMS": 1,
"child_set-MAX_NUM_FORMS": 1000,
"child_set-MIN_NUM_FORMS": 0,
"child_set-TOTAL_FORMS": 4,
"name": "_invalid",
},
)
self.assertContains(response, "Please correct the error below.")
self.assertFalse(response.context["show_save_and_add_another"])
self.assertFalse(response.context["show_save_and_continue"])
self.assertTrue(response.context["show_save_as_new"])
def test_save_as_new_with_inlines_with_validation_errors(self):
parent = Parent.objects.create(name="Father")
child = Child.objects.create(parent=parent, name="Child")
response = self.client.post(
reverse("admin:admin_views_parent_change", args=(parent.pk,)),
{
"_saveasnew": "Save as new",
"child_set-0-parent": parent.pk,
"child_set-0-id": child.pk,
"child_set-0-name": "_invalid",
"child_set-INITIAL_FORMS": 1,
"child_set-MAX_NUM_FORMS": 1000,
"child_set-MIN_NUM_FORMS": 0,
"child_set-TOTAL_FORMS": 4,
"name": "Father",
},
)
self.assertContains(response, "Please correct the error below.")
self.assertFalse(response.context["show_save_and_add_another"])
self.assertFalse(response.context["show_save_and_continue"])
self.assertTrue(response.context["show_save_as_new"])
@override_settings(ROOT_URLCONF="admin_views.urls")
class CustomModelAdminTest(AdminViewBasicTestCase):
def test_custom_admin_site_login_form(self):
self.client.logout()
response = self.client.get(reverse("admin2:index"), follow=True)
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
login = self.client.post(
reverse("admin2:login"),
{
REDIRECT_FIELD_NAME: reverse("admin2:index"),
"username": "customform",
"password": "secret",
},
follow=True,
)
self.assertIsInstance(login, TemplateResponse)
self.assertContains(login, "custom form error")
self.assertContains(login, "path/to/media.css")
def test_custom_admin_site_login_template(self):
self.client.logout()
response = self.client.get(reverse("admin2:index"), follow=True)
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/login.html")
self.assertContains(response, "Hello from a custom login template")
def test_custom_admin_site_logout_template(self):
response = self.client.post(reverse("admin2:logout"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/logout.html")
self.assertContains(response, "Hello from a custom logout template")
def test_custom_admin_site_index_view_and_template(self):
response = self.client.get(reverse("admin2:index"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/index.html")
self.assertContains(response, "Hello from a custom index template *bar*")
def test_custom_admin_site_app_index_view_and_template(self):
response = self.client.get(reverse("admin2:app_list", args=("admin_views",)))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/app_index.html")
self.assertContains(response, "Hello from a custom app_index template")
def test_custom_admin_site_password_change_template(self):
response = self.client.get(reverse("admin2:password_change"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/password_change_form.html")
self.assertContains(
response, "Hello from a custom password change form template"
)
def test_custom_admin_site_password_change_with_extra_context(self):
response = self.client.get(reverse("admin2:password_change"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/password_change_form.html")
self.assertContains(response, "eggs")
def test_custom_admin_site_password_change_done_template(self):
response = self.client.get(reverse("admin2:password_change_done"))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, "custom_admin/password_change_done.html")
self.assertContains(
response, "Hello from a custom password change done template"
)
def test_custom_admin_site_view(self):
self.client.force_login(self.superuser)
response = self.client.get(reverse("admin2:my_view"))
self.assertEqual(response.content, b"Django is a magical pony!")
def test_pwd_change_custom_template(self):
self.client.force_login(self.superuser)
su = User.objects.get(username="super")
response = self.client.get(
reverse("admin4:auth_user_password_change", args=(su.pk,))
)
self.assertEqual(response.status_code, 200)
def get_perm(Model, codename):
"""Return the permission object, for the Model"""
ct = ContentType.objects.get_for_model(Model, for_concrete_model=False)
return Permission.objects.get(content_type=ct, codename=codename)
@override_settings(
ROOT_URLCONF="admin_views.urls",
# Test with the admin's documented list of required context processors.
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
class AdminViewPermissionsTest(TestCase):
"""Tests for Admin Views Permissions."""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.viewuser = User.objects.create_user(
username="viewuser", password="secret", is_staff=True
)
cls.adduser = User.objects.create_user(
username="adduser", password="secret", is_staff=True
)
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.deleteuser = User.objects.create_user(
username="deleteuser", password="secret", is_staff=True
)
cls.joepublicuser = User.objects.create_user(
username="joepublic", password="secret"
)
cls.nostaffuser = User.objects.create_user(
username="nostaff", password="secret"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
another_section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
# Setup permissions, for our users who can add, change, and delete.
opts = Article._meta
# User who can view Articles
cls.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("view", opts))
)
# User who can add Articles
cls.adduser.user_permissions.add(
get_perm(Article, get_permission_codename("add", opts))
)
# User who can change Articles
cls.changeuser.user_permissions.add(
get_perm(Article, get_permission_codename("change", opts))
)
cls.nostaffuser.user_permissions.add(
get_perm(Article, get_permission_codename("change", opts))
)
# User who can delete Articles
cls.deleteuser.user_permissions.add(
get_perm(Article, get_permission_codename("delete", opts))
)
cls.deleteuser.user_permissions.add(
get_perm(Section, get_permission_codename("delete", Section._meta))
)
# login POST dicts
cls.index_url = reverse("admin:index")
cls.super_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "super",
"password": "secret",
}
cls.super_email_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "[email protected]",
"password": "secret",
}
cls.super_email_bad_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "[email protected]",
"password": "notsecret",
}
cls.adduser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "adduser",
"password": "secret",
}
cls.changeuser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "changeuser",
"password": "secret",
}
cls.deleteuser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "deleteuser",
"password": "secret",
}
cls.nostaff_login = {
REDIRECT_FIELD_NAME: reverse("has_permission_admin:index"),
"username": "nostaff",
"password": "secret",
}
cls.joepublic_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "joepublic",
"password": "secret",
}
cls.viewuser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "viewuser",
"password": "secret",
}
cls.no_username_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"password": "secret",
}
def test_login(self):
"""
Make sure only staff members can log in.
Successful posts to the login page will redirect to the original url.
Unsuccessful attempts will continue to render the login page with
a 200 status code.
"""
login_url = "%s?next=%s" % (reverse("admin:login"), reverse("admin:index"))
# Super User
response = self.client.get(self.index_url)
self.assertRedirects(response, login_url)
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Test if user enters email address
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.super_email_login)
self.assertContains(login, ERROR_MESSAGE)
# only correct passwords get a username hint
login = self.client.post(login_url, self.super_email_bad_login)
self.assertContains(login, ERROR_MESSAGE)
new_user = User(username="jondoe", password="secret", email="[email protected]")
new_user.save()
# check to ensure if there are multiple email addresses a user doesn't get a 500
login = self.client.post(login_url, self.super_email_login)
self.assertContains(login, ERROR_MESSAGE)
# View User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.viewuser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Add User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.adduser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Change User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.changeuser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Delete User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.deleteuser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Regular User should not be able to login.
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.joepublic_login)
self.assertContains(login, ERROR_MESSAGE)
# Requests without username should not return 500 errors.
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.no_username_login)
self.assertEqual(login.status_code, 200)
self.assertFormError(
login.context["form"], "username", ["This field is required."]
)
def test_login_redirect_for_direct_get(self):
"""
Login redirect should be to the admin index page when going directly to
/admin/login/.
"""
response = self.client.get(reverse("admin:login"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context[REDIRECT_FIELD_NAME], reverse("admin:index"))
def test_login_has_permission(self):
# Regular User should not be able to login.
response = self.client.get(reverse("has_permission_admin:index"))
self.assertEqual(response.status_code, 302)
login = self.client.post(
reverse("has_permission_admin:login"), self.joepublic_login
)
self.assertContains(login, "permission denied")
# User with permissions should be able to login.
response = self.client.get(reverse("has_permission_admin:index"))
self.assertEqual(response.status_code, 302)
login = self.client.post(
reverse("has_permission_admin:login"), self.nostaff_login
)
self.assertRedirects(login, reverse("has_permission_admin:index"))
self.assertFalse(login.context)
self.client.post(reverse("has_permission_admin:logout"))
# Staff should be able to login.
response = self.client.get(reverse("has_permission_admin:index"))
self.assertEqual(response.status_code, 302)
login = self.client.post(
reverse("has_permission_admin:login"),
{
REDIRECT_FIELD_NAME: reverse("has_permission_admin:index"),
"username": "deleteuser",
"password": "secret",
},
)
self.assertRedirects(login, reverse("has_permission_admin:index"))
self.assertFalse(login.context)
self.client.post(reverse("has_permission_admin:logout"))
def test_login_successfully_redirects_to_original_URL(self):
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
query_string = "the-answer=42"
redirect_url = "%s?%s" % (self.index_url, query_string)
new_next = {REDIRECT_FIELD_NAME: redirect_url}
post_data = self.super_login.copy()
post_data.pop(REDIRECT_FIELD_NAME)
login = self.client.post(
"%s?%s" % (reverse("admin:login"), urlencode(new_next)), post_data
)
self.assertRedirects(login, redirect_url)
def test_double_login_is_not_allowed(self):
"""Regression test for #19327"""
login_url = "%s?next=%s" % (reverse("admin:login"), reverse("admin:index"))
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
# Establish a valid admin session
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
# Logging in with non-admin user fails
login = self.client.post(login_url, self.joepublic_login)
self.assertContains(login, ERROR_MESSAGE)
# Establish a valid admin session
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
# Logging in with admin user while already logged in
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
def test_login_page_notice_for_non_staff_users(self):
"""
A logged-in non-staff user trying to access the admin index should be
presented with the login page and a hint indicating that the current
user doesn't have access to it.
"""
hint_template = "You are authenticated as {}"
# Anonymous user should not be shown the hint
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, "login-form")
self.assertNotContains(response, hint_template.format(""), status_code=200)
# Non-staff user should be shown the hint
self.client.force_login(self.nostaffuser)
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, "login-form")
self.assertContains(
response, hint_template.format(self.nostaffuser.username), status_code=200
)
def test_add_view(self):
"""Test add view restricts access and actually adds items."""
add_dict = {
"title": "Døm ikke",
"content": "<p>great article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
"section": self.s1.pk,
}
# Change User should not have access to add articles
self.client.force_login(self.changeuser)
# make sure the view removes test cookie
self.assertIs(self.client.session.test_cookie_worked(), False)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.status_code, 403)
# Try POST just to make sure
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.post(reverse("admin:logout"))
# View User should not have access to add articles
self.client.force_login(self.viewuser)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.status_code, 403)
# Try POST just to make sure
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
# Now give the user permission to add but not change.
self.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("add", Article._meta))
)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.context["title"], "Add article")
self.assertContains(response, "<title>Add article | Django site admin</title>")
self.assertContains(
response, '<input type="submit" value="Save and view" name="_continue">'
)
post = self.client.post(
reverse("admin:admin_views_article_add"), add_dict, follow=False
)
self.assertEqual(post.status_code, 302)
self.assertEqual(Article.objects.count(), 4)
article = Article.objects.latest("pk")
response = self.client.get(
reverse("admin:admin_views_article_change", args=(article.pk,))
)
self.assertContains(
response,
'<li class="success">The article “Døm ikke” was added successfully.</li>',
)
article.delete()
self.client.post(reverse("admin:logout"))
# Add user may login and POST to add view, then redirect to admin root
self.client.force_login(self.adduser)
addpage = self.client.get(reverse("admin:admin_views_article_add"))
change_list_link = '› <a href="%s">Articles</a>' % reverse(
"admin:admin_views_article_changelist"
)
self.assertNotContains(
addpage,
change_list_link,
msg_prefix=(
"User restricted to add permission is given link to change list view "
"in breadcrumbs."
),
)
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertRedirects(post, self.index_url)
self.assertEqual(Article.objects.count(), 4)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, "Greetings from a created object")
self.client.post(reverse("admin:logout"))
# The addition was logged correctly
addition_log = LogEntry.objects.all()[0]
new_article = Article.objects.last()
article_ct = ContentType.objects.get_for_model(Article)
self.assertEqual(addition_log.user_id, self.adduser.pk)
self.assertEqual(addition_log.content_type_id, article_ct.pk)
self.assertEqual(addition_log.object_id, str(new_article.pk))
self.assertEqual(addition_log.object_repr, "Døm ikke")
self.assertEqual(addition_log.action_flag, ADDITION)
self.assertEqual(addition_log.get_change_message(), "Added.")
# Super can add too, but is redirected to the change list view
self.client.force_login(self.superuser)
addpage = self.client.get(reverse("admin:admin_views_article_add"))
self.assertContains(
addpage,
change_list_link,
msg_prefix=(
"Unrestricted user is not given link to change list view in "
"breadcrumbs."
),
)
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertRedirects(post, reverse("admin:admin_views_article_changelist"))
self.assertEqual(Article.objects.count(), 5)
self.client.post(reverse("admin:logout"))
# 8509 - if a normal user is already logged in, it is possible
# to change user into the superuser without error
self.client.force_login(self.joepublicuser)
# Check and make sure that if user expires, data still persists
self.client.force_login(self.superuser)
# make sure the view removes test cookie
self.assertIs(self.client.session.test_cookie_worked(), False)
@mock.patch("django.contrib.admin.options.InlineModelAdmin.has_change_permission")
def test_add_view_with_view_only_inlines(self, has_change_permission):
"""User with add permission to a section but view-only for inlines."""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("add", Section._meta))
)
self.client.force_login(self.viewuser)
# Valid POST creates a new section.
data = {
"name": "New obj",
"article_set-TOTAL_FORMS": 0,
"article_set-INITIAL_FORMS": 0,
}
response = self.client.post(reverse("admin:admin_views_section_add"), data)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(Section.objects.latest("id").name, data["name"])
# InlineModelAdmin.has_change_permission()'s obj argument is always
# None during object add.
self.assertEqual(
[obj for (request, obj), _ in has_change_permission.call_args_list],
[None, None],
)
def test_change_view(self):
"""Change view should restrict access and allow users to edit items."""
change_dict = {
"title": "Ikke fordømt",
"content": "<p>edited article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
"section": self.s1.pk,
}
article_change_url = reverse(
"admin:admin_views_article_change", args=(self.a1.pk,)
)
article_changelist_url = reverse("admin:admin_views_article_changelist")
# add user should not be able to view the list of article or change any of them
self.client.force_login(self.adduser)
response = self.client.get(article_changelist_url)
self.assertEqual(response.status_code, 403)
response = self.client.get(article_change_url)
self.assertEqual(response.status_code, 403)
post = self.client.post(article_change_url, change_dict)
self.assertEqual(post.status_code, 403)
self.client.post(reverse("admin:logout"))
# view user can view articles but not make changes.
self.client.force_login(self.viewuser)
response = self.client.get(article_changelist_url)
self.assertContains(
response,
"<title>Select article to view | Django site admin</title>",
)
self.assertContains(response, "<h1>Select article to view</h1>")
self.assertEqual(response.context["title"], "Select article to view")
response = self.client.get(article_change_url)
self.assertContains(response, "<title>View article | Django site admin</title>")
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(response, "<label>Extra form field:</label>")
self.assertContains(
response,
'<a href="/test_admin/admin/admin_views/article/" class="closelink">Close'
"</a>",
)
self.assertEqual(response.context["title"], "View article")
post = self.client.post(article_change_url, change_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(
Article.objects.get(pk=self.a1.pk).content, "<p>Middle content</p>"
)
self.client.post(reverse("admin:logout"))
# change user can view all items and edit them
self.client.force_login(self.changeuser)
response = self.client.get(article_changelist_url)
self.assertEqual(response.context["title"], "Select article to change")
self.assertContains(
response,
"<title>Select article to change | Django site admin</title>",
)
self.assertContains(response, "<h1>Select article to change</h1>")
response = self.client.get(article_change_url)
self.assertEqual(response.context["title"], "Change article")
self.assertContains(
response,
"<title>Change article | Django site admin</title>",
)
self.assertContains(response, "<h1>Change article</h1>")
post = self.client.post(article_change_url, change_dict)
self.assertRedirects(post, article_changelist_url)
self.assertEqual(
Article.objects.get(pk=self.a1.pk).content, "<p>edited article</p>"
)
# one error in form should produce singular error message, multiple
# errors plural.
change_dict["title"] = ""
post = self.client.post(article_change_url, change_dict)
self.assertContains(
post,
"Please correct the error below.",
msg_prefix=(
"Singular error message not found in response to post with one error"
),
)
change_dict["content"] = ""
post = self.client.post(article_change_url, change_dict)
self.assertContains(
post,
"Please correct the errors below.",
msg_prefix=(
"Plural error message not found in response to post with multiple "
"errors"
),
)
self.client.post(reverse("admin:logout"))
# Test redirection when using row-level change permissions. Refs #11513.
r1 = RowLevelChangePermissionModel.objects.create(id=1, name="odd id")
r2 = RowLevelChangePermissionModel.objects.create(id=2, name="even id")
r3 = RowLevelChangePermissionModel.objects.create(id=3, name="odd id mult 3")
r6 = RowLevelChangePermissionModel.objects.create(id=6, name="even id mult 3")
change_url_1 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r1.pk,)
)
change_url_2 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r2.pk,)
)
change_url_3 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r3.pk,)
)
change_url_6 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r6.pk,)
)
logins = [
self.superuser,
self.viewuser,
self.adduser,
self.changeuser,
self.deleteuser,
]
for login_user in logins:
with self.subTest(login_user.username):
self.client.force_login(login_user)
response = self.client.get(change_url_1)
self.assertEqual(response.status_code, 403)
response = self.client.post(change_url_1, {"name": "changed"})
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=1).name, "odd id"
)
self.assertEqual(response.status_code, 403)
response = self.client.get(change_url_2)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_2, {"name": "changed"})
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=2).name, "changed"
)
self.assertRedirects(response, self.index_url)
response = self.client.get(change_url_3)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_3, {"name": "changed"})
self.assertEqual(response.status_code, 403)
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=3).name,
"odd id mult 3",
)
response = self.client.get(change_url_6)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_6, {"name": "changed"})
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=6).name, "changed"
)
self.assertRedirects(response, self.index_url)
self.client.post(reverse("admin:logout"))
for login_user in [self.joepublicuser, self.nostaffuser]:
with self.subTest(login_user.username):
self.client.force_login(login_user)
response = self.client.get(change_url_1, follow=True)
self.assertContains(response, "login-form")
response = self.client.post(
change_url_1, {"name": "changed"}, follow=True
)
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=1).name, "odd id"
)
self.assertContains(response, "login-form")
response = self.client.get(change_url_2, follow=True)
self.assertContains(response, "login-form")
response = self.client.post(
change_url_2, {"name": "changed again"}, follow=True
)
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=2).name, "changed"
)
self.assertContains(response, "login-form")
self.client.post(reverse("admin:logout"))
def test_change_view_without_object_change_permission(self):
"""
The object should be read-only if the user has permission to view it
and change objects of that type but not to change the current object.
"""
change_url = reverse("admin9:admin_views_article_change", args=(self.a1.pk,))
self.client.force_login(self.viewuser)
response = self.client.get(change_url)
self.assertEqual(response.context["title"], "View article")
self.assertContains(response, "<title>View article | Django site admin</title>")
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(
response,
'<a href="/test_admin/admin9/admin_views/article/" class="closelink">Close'
"</a>",
)
def test_change_view_save_as_new(self):
"""
'Save as new' should raise PermissionDenied for users without the 'add'
permission.
"""
change_dict_save_as_new = {
"_saveasnew": "Save as new",
"title": "Ikke fordømt",
"content": "<p>edited article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
"section": self.s1.pk,
}
article_change_url = reverse(
"admin:admin_views_article_change", args=(self.a1.pk,)
)
# Add user can perform "Save as new".
article_count = Article.objects.count()
self.client.force_login(self.adduser)
post = self.client.post(article_change_url, change_dict_save_as_new)
self.assertRedirects(post, self.index_url)
self.assertEqual(Article.objects.count(), article_count + 1)
self.client.logout()
# Change user cannot perform "Save as new" (no 'add' permission).
article_count = Article.objects.count()
self.client.force_login(self.changeuser)
post = self.client.post(article_change_url, change_dict_save_as_new)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), article_count)
# User with both add and change permissions should be redirected to the
# change page for the newly created object.
article_count = Article.objects.count()
self.client.force_login(self.superuser)
post = self.client.post(article_change_url, change_dict_save_as_new)
self.assertEqual(Article.objects.count(), article_count + 1)
new_article = Article.objects.latest("id")
self.assertRedirects(
post, reverse("admin:admin_views_article_change", args=(new_article.pk,))
)
def test_change_view_with_view_only_inlines(self):
"""
User with change permission to a section but view-only for inlines.
"""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("change", Section._meta))
)
self.client.force_login(self.viewuser)
# GET shows inlines.
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 3)
# Valid POST changes the name.
data = {
"name": "Can edit name with view-only inlines",
"article_set-TOTAL_FORMS": 3,
"article_set-INITIAL_FORMS": 3,
}
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Section.objects.get(pk=self.s1.pk).name, data["name"])
# Invalid POST reshows inlines.
del data["name"]
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 3)
def test_change_view_with_view_only_last_inline(self):
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("view", Section._meta))
)
self.client.force_login(self.viewuser)
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 3)
# The last inline is not marked as empty.
self.assertContains(response, 'id="article_set-2"')
def test_change_view_with_view_and_add_inlines(self):
"""User has view and add permissions on the inline model."""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("change", Section._meta))
)
self.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("add", Article._meta))
)
self.client.force_login(self.viewuser)
# GET shows inlines.
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 6)
# Valid POST creates a new article.
data = {
"name": "Can edit name with view-only inlines",
"article_set-TOTAL_FORMS": 6,
"article_set-INITIAL_FORMS": 3,
"article_set-3-id": [""],
"article_set-3-title": ["A title"],
"article_set-3-content": ["Added content"],
"article_set-3-date_0": ["2008-3-18"],
"article_set-3-date_1": ["11:54:58"],
"article_set-3-section": [str(self.s1.pk)],
}
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Section.objects.get(pk=self.s1.pk).name, data["name"])
self.assertEqual(Article.objects.count(), 4)
# Invalid POST reshows inlines.
del data["name"]
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 6)
def test_change_view_with_view_and_delete_inlines(self):
"""User has view and delete permissions on the inline model."""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("change", Section._meta))
)
self.client.force_login(self.viewuser)
data = {
"name": "Name is required.",
"article_set-TOTAL_FORMS": 6,
"article_set-INITIAL_FORMS": 3,
"article_set-0-id": [str(self.a1.pk)],
"article_set-0-DELETE": ["on"],
}
# Inline POST details are ignored without delete permission.
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Article.objects.count(), 3)
# Deletion successful when delete permission is added.
self.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("delete", Article._meta))
)
data = {
"name": "Name is required.",
"article_set-TOTAL_FORMS": 6,
"article_set-INITIAL_FORMS": 3,
"article_set-0-id": [str(self.a1.pk)],
"article_set-0-DELETE": ["on"],
}
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Article.objects.count(), 2)
def test_delete_view(self):
"""Delete view should restrict access and actually delete items."""
delete_dict = {"post": "yes"}
delete_url = reverse("admin:admin_views_article_delete", args=(self.a1.pk,))
# add user should not be able to delete articles
self.client.force_login(self.adduser)
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 403)
post = self.client.post(delete_url, delete_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.logout()
# view user should not be able to delete articles
self.client.force_login(self.viewuser)
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 403)
post = self.client.post(delete_url, delete_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.logout()
# Delete user can delete
self.client.force_login(self.deleteuser)
response = self.client.get(
reverse("admin:admin_views_section_delete", args=(self.s1.pk,))
)
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Articles: 3</li>")
# test response contains link to related Article
self.assertContains(response, "admin_views/article/%s/" % self.a1.pk)
response = self.client.get(delete_url)
self.assertContains(response, "admin_views/article/%s/" % self.a1.pk)
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Articles: 1</li>")
post = self.client.post(delete_url, delete_dict)
self.assertRedirects(post, self.index_url)
self.assertEqual(Article.objects.count(), 2)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "Greetings from a deleted object")
article_ct = ContentType.objects.get_for_model(Article)
logged = LogEntry.objects.get(content_type=article_ct, action_flag=DELETION)
self.assertEqual(logged.object_id, str(self.a1.pk))
def test_delete_view_with_no_default_permissions(self):
"""
The delete view allows users to delete collected objects without a
'delete' permission (ReadOnlyPizza.Meta.default_permissions is empty).
"""
pizza = ReadOnlyPizza.objects.create(name="Double Cheese")
delete_url = reverse("admin:admin_views_readonlypizza_delete", args=(pizza.pk,))
self.client.force_login(self.adduser)
response = self.client.get(delete_url)
self.assertContains(response, "admin_views/readonlypizza/%s/" % pizza.pk)
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Read only pizzas: 1</li>")
post = self.client.post(delete_url, {"post": "yes"})
self.assertRedirects(
post, reverse("admin:admin_views_readonlypizza_changelist")
)
self.assertEqual(ReadOnlyPizza.objects.count(), 0)
def test_delete_view_nonexistent_obj(self):
self.client.force_login(self.deleteuser)
url = reverse("admin:admin_views_article_delete", args=("nonexistent",))
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["article with ID “nonexistent” doesn’t exist. Perhaps it was deleted?"],
)
def test_history_view(self):
"""History view should restrict access."""
# add user should not be able to view the list of article or change any of them
self.client.force_login(self.adduser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=(self.a1.pk,))
)
self.assertEqual(response.status_code, 403)
self.client.post(reverse("admin:logout"))
# view user can view all items
self.client.force_login(self.viewuser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=(self.a1.pk,))
)
self.assertEqual(response.status_code, 200)
self.client.post(reverse("admin:logout"))
# change user can view all items and edit them
self.client.force_login(self.changeuser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=(self.a1.pk,))
)
self.assertEqual(response.status_code, 200)
# Test redirection when using row-level change permissions. Refs #11513.
rl1 = RowLevelChangePermissionModel.objects.create(id=1, name="odd id")
rl2 = RowLevelChangePermissionModel.objects.create(id=2, name="even id")
logins = [
self.superuser,
self.viewuser,
self.adduser,
self.changeuser,
self.deleteuser,
]
for login_user in logins:
with self.subTest(login_user.username):
self.client.force_login(login_user)
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl1.pk,),
)
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl2.pk,),
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.client.post(reverse("admin:logout"))
for login_user in [self.joepublicuser, self.nostaffuser]:
with self.subTest(login_user.username):
self.client.force_login(login_user)
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl1.pk,),
)
response = self.client.get(url, follow=True)
self.assertContains(response, "login-form")
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl2.pk,),
)
response = self.client.get(url, follow=True)
self.assertContains(response, "login-form")
self.client.post(reverse("admin:logout"))
def test_history_view_bad_url(self):
self.client.force_login(self.changeuser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=("foo",)), follow=True
)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["article with ID “foo” doesn’t exist. Perhaps it was deleted?"],
)
def test_conditionally_show_add_section_link(self):
"""
The foreign key widget should only show the "add related" button if the
user has permission to add that related item.
"""
self.client.force_login(self.adduser)
# The user can't add sections yet, so they shouldn't see the "add section" link.
url = reverse("admin:admin_views_article_add")
add_link_text = "add_id_section"
response = self.client.get(url)
self.assertNotContains(response, add_link_text)
# Allow the user to add sections too. Now they can see the "add section" link.
user = User.objects.get(username="adduser")
perm = get_perm(Section, get_permission_codename("add", Section._meta))
user.user_permissions.add(perm)
response = self.client.get(url)
self.assertContains(response, add_link_text)
def test_conditionally_show_change_section_link(self):
"""
The foreign key widget should only show the "change related" button if
the user has permission to change that related item.
"""
def get_change_related(response):
return (
response.context["adminform"]
.form.fields["section"]
.widget.can_change_related
)
self.client.force_login(self.adduser)
# The user can't change sections yet, so they shouldn't see the
# "change section" link.
url = reverse("admin:admin_views_article_add")
change_link_text = "change_id_section"
response = self.client.get(url)
self.assertFalse(get_change_related(response))
self.assertNotContains(response, change_link_text)
# Allow the user to change sections too. Now they can see the
# "change section" link.
user = User.objects.get(username="adduser")
perm = get_perm(Section, get_permission_codename("change", Section._meta))
user.user_permissions.add(perm)
response = self.client.get(url)
self.assertTrue(get_change_related(response))
self.assertContains(response, change_link_text)
def test_conditionally_show_delete_section_link(self):
"""
The foreign key widget should only show the "delete related" button if
the user has permission to delete that related item.
"""
def get_delete_related(response):
return (
response.context["adminform"]
.form.fields["sub_section"]
.widget.can_delete_related
)
self.client.force_login(self.adduser)
# The user can't delete sections yet, so they shouldn't see the
# "delete section" link.
url = reverse("admin:admin_views_article_add")
delete_link_text = "delete_id_sub_section"
response = self.client.get(url)
self.assertFalse(get_delete_related(response))
self.assertNotContains(response, delete_link_text)
# Allow the user to delete sections too. Now they can see the
# "delete section" link.
user = User.objects.get(username="adduser")
perm = get_perm(Section, get_permission_codename("delete", Section._meta))
user.user_permissions.add(perm)
response = self.client.get(url)
self.assertTrue(get_delete_related(response))
self.assertContains(response, delete_link_text)
def test_disabled_permissions_when_logged_in(self):
self.client.force_login(self.superuser)
superuser = User.objects.get(username="super")
superuser.is_active = False
superuser.save()
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, 'id="login-form"')
self.assertNotContains(response, "Log out")
response = self.client.get(reverse("secure_view"), follow=True)
self.assertContains(response, 'id="login-form"')
def test_disabled_staff_permissions_when_logged_in(self):
self.client.force_login(self.superuser)
superuser = User.objects.get(username="super")
superuser.is_staff = False
superuser.save()
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, 'id="login-form"')
self.assertNotContains(response, "Log out")
response = self.client.get(reverse("secure_view"), follow=True)
self.assertContains(response, 'id="login-form"')
def test_app_list_permissions(self):
"""
If a user has no module perms, the app list returns a 404.
"""
opts = Article._meta
change_user = User.objects.get(username="changeuser")
permission = get_perm(Article, get_permission_codename("change", opts))
self.client.force_login(self.changeuser)
# the user has no module permissions
change_user.user_permissions.remove(permission)
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertEqual(response.status_code, 404)
# the user now has module permissions
change_user.user_permissions.add(permission)
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertEqual(response.status_code, 200)
def test_shortcut_view_only_available_to_staff(self):
"""
Only admin users should be able to use the admin shortcut view.
"""
model_ctype = ContentType.objects.get_for_model(ModelWithStringPrimaryKey)
obj = ModelWithStringPrimaryKey.objects.create(string_pk="foo")
shortcut_url = reverse("admin:view_on_site", args=(model_ctype.pk, obj.pk))
# Not logged in: we should see the login page.
response = self.client.get(shortcut_url, follow=True)
self.assertTemplateUsed(response, "admin/login.html")
# Logged in? Redirect.
self.client.force_login(self.superuser)
response = self.client.get(shortcut_url, follow=False)
# Can't use self.assertRedirects() because User.get_absolute_url() is silly.
self.assertEqual(response.status_code, 302)
# Domain may depend on contrib.sites tests also run
self.assertRegex(response.url, "http://(testserver|example.com)/dummy/foo/")
def test_has_module_permission(self):
"""
has_module_permission() returns True for all users who
have any permission for that module (add, change, or delete), so that
the module is displayed on the admin index page.
"""
self.client.force_login(self.superuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.viewuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.adduser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.changeuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.deleteuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
def test_overriding_has_module_permission(self):
"""
If has_module_permission() always returns False, the module shouldn't
be displayed on the admin index page for any users.
"""
articles = Article._meta.verbose_name_plural.title()
sections = Section._meta.verbose_name_plural.title()
index_url = reverse("admin7:index")
self.client.force_login(self.superuser)
response = self.client.get(index_url)
self.assertContains(response, sections)
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.viewuser)
response = self.client.get(index_url)
self.assertNotContains(response, "admin_views")
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.adduser)
response = self.client.get(index_url)
self.assertNotContains(response, "admin_views")
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.changeuser)
response = self.client.get(index_url)
self.assertNotContains(response, "admin_views")
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.deleteuser)
response = self.client.get(index_url)
self.assertNotContains(response, articles)
# The app list displays Sections but not Articles as the latter has
# ModelAdmin.has_module_permission() = False.
self.client.force_login(self.superuser)
response = self.client.get(reverse("admin7:app_list", args=("admin_views",)))
self.assertContains(response, sections)
self.assertNotContains(response, articles)
def test_post_save_message_no_forbidden_links_visible(self):
"""
Post-save message shouldn't contain a link to the change form if the
user doesn't have the change permission.
"""
self.client.force_login(self.adduser)
# Emulate Article creation for user with add-only permission.
post_data = {
"title": "Fun & games",
"content": "Some content",
"date_0": "2015-10-31",
"date_1": "16:35:00",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_article_add"), post_data, follow=True
)
self.assertContains(
response,
'<li class="success">The article “Fun & games” was added successfully.'
"</li>",
html=True,
)
@override_settings(
ROOT_URLCONF="admin_views.urls",
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
class AdminViewProxyModelPermissionsTests(TestCase):
"""Tests for proxy models permissions in the admin."""
@classmethod
def setUpTestData(cls):
cls.viewuser = User.objects.create_user(
username="viewuser", password="secret", is_staff=True
)
cls.adduser = User.objects.create_user(
username="adduser", password="secret", is_staff=True
)
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.deleteuser = User.objects.create_user(
username="deleteuser", password="secret", is_staff=True
)
# Setup permissions.
opts = UserProxy._meta
cls.viewuser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("view", opts))
)
cls.adduser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("add", opts))
)
cls.changeuser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("change", opts))
)
cls.deleteuser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("delete", opts))
)
# UserProxy instances.
cls.user_proxy = UserProxy.objects.create(
username="user_proxy", password="secret"
)
def test_add(self):
self.client.force_login(self.adduser)
url = reverse("admin:admin_views_userproxy_add")
data = {
"username": "can_add",
"password": "secret",
"date_joined_0": "2019-01-15",
"date_joined_1": "16:59:10",
}
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(UserProxy.objects.filter(username="can_add").exists())
def test_view(self):
self.client.force_login(self.viewuser)
response = self.client.get(reverse("admin:admin_views_userproxy_changelist"))
self.assertContains(response, "<h1>Select user proxy to view</h1>")
response = self.client.get(
reverse("admin:admin_views_userproxy_change", args=(self.user_proxy.pk,))
)
self.assertContains(response, "<h1>View user proxy</h1>")
self.assertContains(response, '<div class="readonly">user_proxy</div>')
def test_change(self):
self.client.force_login(self.changeuser)
data = {
"password": self.user_proxy.password,
"username": self.user_proxy.username,
"date_joined_0": self.user_proxy.date_joined.strftime("%Y-%m-%d"),
"date_joined_1": self.user_proxy.date_joined.strftime("%H:%M:%S"),
"first_name": "first_name",
}
url = reverse("admin:admin_views_userproxy_change", args=(self.user_proxy.pk,))
response = self.client.post(url, data)
self.assertRedirects(
response, reverse("admin:admin_views_userproxy_changelist")
)
self.assertEqual(
UserProxy.objects.get(pk=self.user_proxy.pk).first_name, "first_name"
)
def test_delete(self):
self.client.force_login(self.deleteuser)
url = reverse("admin:admin_views_userproxy_delete", args=(self.user_proxy.pk,))
response = self.client.post(url, {"post": "yes"}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFalse(UserProxy.objects.filter(pk=self.user_proxy.pk).exists())
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewsNoUrlTest(TestCase):
"""Regression test for #17333"""
@classmethod
def setUpTestData(cls):
# User who can change Reports
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.changeuser.user_permissions.add(
get_perm(Report, get_permission_codename("change", Report._meta))
)
def test_no_standard_modeladmin_urls(self):
"""Admin index views don't break when user's ModelAdmin removes standard urls"""
self.client.force_login(self.changeuser)
r = self.client.get(reverse("admin:index"))
# we shouldn't get a 500 error caused by a NoReverseMatch
self.assertEqual(r.status_code, 200)
self.client.post(reverse("admin:logout"))
@skipUnlessDBFeature("can_defer_constraint_checks")
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewDeletedObjectsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.deleteuser = User.objects.create_user(
username="deleteuser", password="secret", is_staff=True
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.v1 = Villain.objects.create(name="Adam")
cls.v2 = Villain.objects.create(name="Sue")
cls.sv1 = SuperVillain.objects.create(name="Bob")
cls.pl1 = Plot.objects.create(
name="World Domination", team_leader=cls.v1, contact=cls.v2
)
cls.pl2 = Plot.objects.create(
name="World Peace", team_leader=cls.v2, contact=cls.v2
)
cls.pl3 = Plot.objects.create(
name="Corn Conspiracy", team_leader=cls.v1, contact=cls.v1
)
cls.pd1 = PlotDetails.objects.create(details="almost finished", plot=cls.pl1)
cls.sh1 = SecretHideout.objects.create(
location="underground bunker", villain=cls.v1
)
cls.sh2 = SecretHideout.objects.create(
location="floating castle", villain=cls.sv1
)
cls.ssh1 = SuperSecretHideout.objects.create(
location="super floating castle!", supervillain=cls.sv1
)
cls.cy1 = CyclicOne.objects.create(name="I am recursive", two_id=1)
cls.cy2 = CyclicTwo.objects.create(name="I am recursive too", one_id=1)
def setUp(self):
self.client.force_login(self.superuser)
def test_nesting(self):
"""
Objects should be nested to display the relationships that
cause them to be scheduled for deletion.
"""
pattern = re.compile(
r'<li>Plot: <a href="%s">World Domination</a>\s*<ul>\s*'
r'<li>Plot details: <a href="%s">almost finished</a>'
% (
reverse("admin:admin_views_plot_change", args=(self.pl1.pk,)),
reverse("admin:admin_views_plotdetails_change", args=(self.pd1.pk,)),
)
)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v1.pk,))
)
self.assertRegex(response.content.decode(), pattern)
def test_cyclic(self):
"""
Cyclic relationships should still cause each object to only be
listed once.
"""
one = '<li>Cyclic one: <a href="%s">I am recursive</a>' % (
reverse("admin:admin_views_cyclicone_change", args=(self.cy1.pk,)),
)
two = '<li>Cyclic two: <a href="%s">I am recursive too</a>' % (
reverse("admin:admin_views_cyclictwo_change", args=(self.cy2.pk,)),
)
response = self.client.get(
reverse("admin:admin_views_cyclicone_delete", args=(self.cy1.pk,))
)
self.assertContains(response, one, 1)
self.assertContains(response, two, 1)
def test_perms_needed(self):
self.client.logout()
delete_user = User.objects.get(username="deleteuser")
delete_user.user_permissions.add(
get_perm(Plot, get_permission_codename("delete", Plot._meta))
)
self.client.force_login(self.deleteuser)
response = self.client.get(
reverse("admin:admin_views_plot_delete", args=(self.pl1.pk,))
)
self.assertContains(
response,
"your account doesn't have permission to delete the following types of "
"objects",
)
self.assertContains(response, "<li>plot details</li>")
def test_protected(self):
q = Question.objects.create(question="Why?")
a1 = Answer.objects.create(question=q, answer="Because.")
a2 = Answer.objects.create(question=q, answer="Yes.")
response = self.client.get(
reverse("admin:admin_views_question_delete", args=(q.pk,))
)
self.assertContains(
response, "would require deleting the following protected related objects"
)
self.assertContains(
response,
'<li>Answer: <a href="%s">Because.</a></li>'
% reverse("admin:admin_views_answer_change", args=(a1.pk,)),
)
self.assertContains(
response,
'<li>Answer: <a href="%s">Yes.</a></li>'
% reverse("admin:admin_views_answer_change", args=(a2.pk,)),
)
def test_post_delete_protected(self):
"""
A POST request to delete protected objects should display the page
which says the deletion is prohibited.
"""
q = Question.objects.create(question="Why?")
Answer.objects.create(question=q, answer="Because.")
response = self.client.post(
reverse("admin:admin_views_question_delete", args=(q.pk,)), {"post": "yes"}
)
self.assertEqual(Question.objects.count(), 1)
self.assertContains(
response, "would require deleting the following protected related objects"
)
def test_restricted(self):
album = Album.objects.create(title="Amaryllis")
song = Song.objects.create(album=album, name="Unity")
response = self.client.get(
reverse("admin:admin_views_album_delete", args=(album.pk,))
)
self.assertContains(
response,
"would require deleting the following protected related objects",
)
self.assertContains(
response,
'<li>Song: <a href="%s">Unity</a></li>'
% reverse("admin:admin_views_song_change", args=(song.pk,)),
)
def test_post_delete_restricted(self):
album = Album.objects.create(title="Amaryllis")
Song.objects.create(album=album, name="Unity")
response = self.client.post(
reverse("admin:admin_views_album_delete", args=(album.pk,)),
{"post": "yes"},
)
self.assertEqual(Album.objects.count(), 1)
self.assertContains(
response,
"would require deleting the following protected related objects",
)
def test_not_registered(self):
should_contain = """<li>Secret hideout: underground bunker"""
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v1.pk,))
)
self.assertContains(response, should_contain, 1)
def test_multiple_fkeys_to_same_model(self):
"""
If a deleted object has two relationships from another model,
both of those should be followed in looking for related
objects to delete.
"""
should_contain = '<li>Plot: <a href="%s">World Domination</a>' % reverse(
"admin:admin_views_plot_change", args=(self.pl1.pk,)
)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v1.pk,))
)
self.assertContains(response, should_contain)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v2.pk,))
)
self.assertContains(response, should_contain)
def test_multiple_fkeys_to_same_instance(self):
"""
If a deleted object has two relationships pointing to it from
another object, the other object should still only be listed
once.
"""
should_contain = '<li>Plot: <a href="%s">World Peace</a></li>' % reverse(
"admin:admin_views_plot_change", args=(self.pl2.pk,)
)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v2.pk,))
)
self.assertContains(response, should_contain, 1)
def test_inheritance(self):
"""
In the case of an inherited model, if either the child or
parent-model instance is deleted, both instances are listed
for deletion, as well as any relationships they have.
"""
should_contain = [
'<li>Villain: <a href="%s">Bob</a>'
% reverse("admin:admin_views_villain_change", args=(self.sv1.pk,)),
'<li>Super villain: <a href="%s">Bob</a>'
% reverse("admin:admin_views_supervillain_change", args=(self.sv1.pk,)),
"<li>Secret hideout: floating castle",
"<li>Super secret hideout: super floating castle!",
]
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.sv1.pk,))
)
for should in should_contain:
self.assertContains(response, should, 1)
response = self.client.get(
reverse("admin:admin_views_supervillain_delete", args=(self.sv1.pk,))
)
for should in should_contain:
self.assertContains(response, should, 1)
def test_generic_relations(self):
"""
If a deleted object has GenericForeignKeys pointing to it,
those objects should be listed for deletion.
"""
plot = self.pl3
tag = FunkyTag.objects.create(content_object=plot, name="hott")
should_contain = '<li>Funky tag: <a href="%s">hott' % reverse(
"admin:admin_views_funkytag_change", args=(tag.id,)
)
response = self.client.get(
reverse("admin:admin_views_plot_delete", args=(plot.pk,))
)
self.assertContains(response, should_contain)
def test_generic_relations_with_related_query_name(self):
"""
If a deleted object has GenericForeignKey with
GenericRelation(related_query_name='...') pointing to it, those objects
should be listed for deletion.
"""
bookmark = Bookmark.objects.create(name="djangoproject")
tag = FunkyTag.objects.create(content_object=bookmark, name="django")
tag_url = reverse("admin:admin_views_funkytag_change", args=(tag.id,))
should_contain = '<li>Funky tag: <a href="%s">django' % tag_url
response = self.client.get(
reverse("admin:admin_views_bookmark_delete", args=(bookmark.pk,))
)
self.assertContains(response, should_contain)
def test_delete_view_uses_get_deleted_objects(self):
"""The delete view uses ModelAdmin.get_deleted_objects()."""
book = Book.objects.create(name="Test Book")
response = self.client.get(
reverse("admin2:admin_views_book_delete", args=(book.pk,))
)
# BookAdmin.get_deleted_objects() returns custom text.
self.assertContains(response, "a deletable object")
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestGenericRelations(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.v1 = Villain.objects.create(name="Adam")
cls.pl3 = Plot.objects.create(
name="Corn Conspiracy", team_leader=cls.v1, contact=cls.v1
)
def setUp(self):
self.client.force_login(self.superuser)
def test_generic_content_object_in_list_display(self):
FunkyTag.objects.create(content_object=self.pl3, name="hott")
response = self.client.get(reverse("admin:admin_views_funkytag_changelist"))
self.assertContains(response, "%s</td>" % self.pl3)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewStringPrimaryKeyTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.pk = (
"abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 "
r"""-_.!~*'() ;/?:@&=+$, <>#%" {}|\^[]`"""
)
cls.m1 = ModelWithStringPrimaryKey.objects.create(string_pk=cls.pk)
content_type_pk = ContentType.objects.get_for_model(
ModelWithStringPrimaryKey
).pk
user_pk = cls.superuser.pk
LogEntry.objects.log_action(
user_pk,
content_type_pk,
cls.pk,
cls.pk,
2,
change_message="Changed something",
)
def setUp(self):
self.client.force_login(self.superuser)
def test_get_history_view(self):
"""
Retrieving the history for an object using urlencoded form of primary
key should work.
Refs #12349, #18550.
"""
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_history", args=(self.pk,)
)
)
self.assertContains(response, escape(self.pk))
self.assertContains(response, "Changed something")
def test_get_change_view(self):
"Retrieving the object using urlencoded form of primary key should work"
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change", args=(self.pk,)
)
)
self.assertContains(response, escape(self.pk))
def test_changelist_to_changeform_link(self):
"""
Link to the changeform of the object in changelist should use reverse()
and be quoted.
"""
response = self.client.get(
reverse("admin:admin_views_modelwithstringprimarykey_changelist")
)
# this URL now comes through reverse(), thus url quoting and iri_to_uri encoding
pk_final_url = escape(iri_to_uri(quote(self.pk)))
change_url = reverse(
"admin:admin_views_modelwithstringprimarykey_change", args=("__fk__",)
).replace("__fk__", pk_final_url)
should_contain = '<th class="field-__str__"><a href="%s">%s</a></th>' % (
change_url,
escape(self.pk),
)
self.assertContains(response, should_contain)
def test_recentactions_link(self):
"""
The link from the recent actions list referring to the changeform of
the object should be quoted.
"""
response = self.client.get(reverse("admin:index"))
link = reverse(
"admin:admin_views_modelwithstringprimarykey_change", args=(quote(self.pk),)
)
should_contain = """<a href="%s">%s</a>""" % (escape(link), escape(self.pk))
self.assertContains(response, should_contain)
def test_deleteconfirmation_link(self):
""" "
The link from the delete confirmation page referring back to the
changeform of the object should be quoted.
"""
url = reverse(
"admin:admin_views_modelwithstringprimarykey_delete", args=(quote(self.pk),)
)
response = self.client.get(url)
# this URL now comes through reverse(), thus url quoting and iri_to_uri encoding
change_url = reverse(
"admin:admin_views_modelwithstringprimarykey_change", args=("__fk__",)
).replace("__fk__", escape(iri_to_uri(quote(self.pk))))
should_contain = '<a href="%s">%s</a>' % (change_url, escape(self.pk))
self.assertContains(response, should_contain)
def test_url_conflicts_with_add(self):
"A model with a primary key that ends with add or is `add` should be visible"
add_model = ModelWithStringPrimaryKey.objects.create(
pk="i have something to add"
)
add_model.save()
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(add_model.pk),),
)
)
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
add_model2 = ModelWithStringPrimaryKey.objects.create(pk="add")
add_url = reverse("admin:admin_views_modelwithstringprimarykey_add")
change_url = reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(add_model2.pk),),
)
self.assertNotEqual(add_url, change_url)
def test_url_conflicts_with_delete(self):
"A model with a primary key that ends with delete should be visible"
delete_model = ModelWithStringPrimaryKey(pk="delete")
delete_model.save()
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(delete_model.pk),),
)
)
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
def test_url_conflicts_with_history(self):
"A model with a primary key that ends with history should be visible"
history_model = ModelWithStringPrimaryKey(pk="history")
history_model.save()
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(history_model.pk),),
)
)
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
def test_shortcut_view_with_escaping(self):
"'View on site should' work properly with char fields"
model = ModelWithStringPrimaryKey(pk="abc_123")
model.save()
response = self.client.get(
reverse(
"admin:admin_views_modelwithstringprimarykey_change",
args=(quote(model.pk),),
)
)
should_contain = '/%s/" class="viewsitelink">' % model.pk
self.assertContains(response, should_contain)
def test_change_view_history_link(self):
"""Object history button link should work and contain the pk value quoted."""
url = reverse(
"admin:%s_modelwithstringprimarykey_change"
% ModelWithStringPrimaryKey._meta.app_label,
args=(quote(self.pk),),
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
expected_link = reverse(
"admin:%s_modelwithstringprimarykey_history"
% ModelWithStringPrimaryKey._meta.app_label,
args=(quote(self.pk),),
)
self.assertContains(
response, '<a href="%s" class="historylink"' % escape(expected_link)
)
def test_redirect_on_add_view_continue_button(self):
"""As soon as an object is added using "Save and continue editing"
button, the user should be redirected to the object's change_view.
In case primary key is a string containing some special characters
like slash or underscore, these characters must be escaped (see #22266)
"""
response = self.client.post(
reverse("admin:admin_views_modelwithstringprimarykey_add"),
{
"string_pk": "123/history",
"_continue": "1", # Save and continue editing
},
)
self.assertEqual(response.status_code, 302) # temporary redirect
self.assertIn("/123_2Fhistory/", response.headers["location"]) # PK is quoted
@override_settings(ROOT_URLCONF="admin_views.urls")
class SecureViewTests(TestCase):
"""
Test behavior of a view protected by the staff_member_required decorator.
"""
def test_secure_view_shows_login_if_not_logged_in(self):
secure_url = reverse("secure_view")
response = self.client.get(secure_url)
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin:login"), secure_url)
)
response = self.client.get(secure_url, follow=True)
self.assertTemplateUsed(response, "admin/login.html")
self.assertEqual(response.context[REDIRECT_FIELD_NAME], secure_url)
def test_staff_member_required_decorator_works_with_argument(self):
"""
Staff_member_required decorator works with an argument
(redirect_field_name).
"""
secure_url = "/test_admin/admin/secure-view2/"
response = self.client.get(secure_url)
self.assertRedirects(
response, "%s?myfield=%s" % (reverse("admin:login"), secure_url)
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewUnicodeTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.b1 = Book.objects.create(name="Lærdommer")
cls.p1 = Promo.objects.create(name="<Promo for Lærdommer>", book=cls.b1)
cls.chap1 = Chapter.objects.create(
title="Norske bostaver æøå skaper problemer",
content="<p>Svært frustrerende med UnicodeDecodeErro</p>",
book=cls.b1,
)
cls.chap2 = Chapter.objects.create(
title="Kjærlighet",
content="<p>La kjærligheten til de lidende seire.</p>",
book=cls.b1,
)
cls.chap3 = Chapter.objects.create(
title="Kjærlighet", content="<p>Noe innhold</p>", book=cls.b1
)
cls.chap4 = ChapterXtra1.objects.create(
chap=cls.chap1, xtra="<Xtra(1) Norske bostaver æøå skaper problemer>"
)
cls.chap5 = ChapterXtra1.objects.create(
chap=cls.chap2, xtra="<Xtra(1) Kjærlighet>"
)
cls.chap6 = ChapterXtra1.objects.create(
chap=cls.chap3, xtra="<Xtra(1) Kjærlighet>"
)
cls.chap7 = ChapterXtra2.objects.create(
chap=cls.chap1, xtra="<Xtra(2) Norske bostaver æøå skaper problemer>"
)
cls.chap8 = ChapterXtra2.objects.create(
chap=cls.chap2, xtra="<Xtra(2) Kjærlighet>"
)
cls.chap9 = ChapterXtra2.objects.create(
chap=cls.chap3, xtra="<Xtra(2) Kjærlighet>"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_unicode_edit(self):
"""
A test to ensure that POST on edit_view handles non-ASCII characters.
"""
post_data = {
"name": "Test lærdommer",
# inline data
"chapter_set-TOTAL_FORMS": "6",
"chapter_set-INITIAL_FORMS": "3",
"chapter_set-MAX_NUM_FORMS": "0",
"chapter_set-0-id": self.chap1.pk,
"chapter_set-0-title": "Norske bostaver æøå skaper problemer",
"chapter_set-0-content": (
"<p>Svært frustrerende med UnicodeDecodeError</p>"
),
"chapter_set-1-id": self.chap2.id,
"chapter_set-1-title": "Kjærlighet.",
"chapter_set-1-content": (
"<p>La kjærligheten til de lidende seire.</p>"
),
"chapter_set-2-id": self.chap3.id,
"chapter_set-2-title": "Need a title.",
"chapter_set-2-content": "<p>Newest content</p>",
"chapter_set-3-id": "",
"chapter_set-3-title": "",
"chapter_set-3-content": "",
"chapter_set-4-id": "",
"chapter_set-4-title": "",
"chapter_set-4-content": "",
"chapter_set-5-id": "",
"chapter_set-5-title": "",
"chapter_set-5-content": "",
}
response = self.client.post(
reverse("admin:admin_views_book_change", args=(self.b1.pk,)), post_data
)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_unicode_delete(self):
"""
The delete_view handles non-ASCII characters
"""
delete_dict = {"post": "yes"}
delete_url = reverse("admin:admin_views_book_delete", args=(self.b1.pk,))
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(delete_url, delete_dict)
self.assertRedirects(response, reverse("admin:admin_views_book_changelist"))
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewListEditable(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.per1 = Person.objects.create(name="John Mauchly", gender=1, alive=True)
cls.per2 = Person.objects.create(name="Grace Hopper", gender=1, alive=False)
cls.per3 = Person.objects.create(name="Guido van Rossum", gender=1, alive=True)
def setUp(self):
self.client.force_login(self.superuser)
def test_inheritance(self):
Podcast.objects.create(
name="This Week in Django", release_date=datetime.date.today()
)
response = self.client.get(reverse("admin:admin_views_podcast_changelist"))
self.assertEqual(response.status_code, 200)
def test_inheritance_2(self):
Vodcast.objects.create(name="This Week in Django", released=True)
response = self.client.get(reverse("admin:admin_views_vodcast_changelist"))
self.assertEqual(response.status_code, 200)
def test_custom_pk(self):
Language.objects.create(iso="en", name="English", english_name="English")
response = self.client.get(reverse("admin:admin_views_language_changelist"))
self.assertEqual(response.status_code, 200)
def test_changelist_input_html(self):
response = self.client.get(reverse("admin:admin_views_person_changelist"))
# 2 inputs per object(the field and the hidden id field) = 6
# 4 management hidden fields = 4
# 4 action inputs (3 regular checkboxes, 1 checkbox to select all)
# main form submit button = 1
# search field and search submit button = 2
# CSRF field = 2
# field to track 'select all' across paginated views = 1
# 6 + 4 + 4 + 1 + 2 + 2 + 1 = 20 inputs
self.assertContains(response, "<input", count=21)
# 1 select per object = 3 selects
self.assertContains(response, "<select", count=4)
def test_post_messages(self):
# Ticket 12707: Saving inline editable should not show admin
# action warnings
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": str(self.per1.pk),
"form-1-gender": "2",
"form-1-id": str(self.per2.pk),
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": str(self.per3.pk),
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_person_changelist"), data, follow=True
)
self.assertEqual(len(response.context["messages"]), 1)
def test_post_submission(self):
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": str(self.per1.pk),
"form-1-gender": "2",
"form-1-id": str(self.per2.pk),
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": str(self.per3.pk),
"_save": "Save",
}
self.client.post(reverse("admin:admin_views_person_changelist"), data)
self.assertIs(Person.objects.get(name="John Mauchly").alive, False)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 2)
# test a filtered page
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "2",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(self.per1.pk),
"form-0-gender": "1",
"form-0-alive": "checked",
"form-1-id": str(self.per3.pk),
"form-1-gender": "1",
"form-1-alive": "checked",
"_save": "Save",
}
self.client.post(
reverse("admin:admin_views_person_changelist") + "?gender__exact=1", data
)
self.assertIs(Person.objects.get(name="John Mauchly").alive, True)
# test a searched page
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(self.per1.pk),
"form-0-gender": "1",
"_save": "Save",
}
self.client.post(
reverse("admin:admin_views_person_changelist") + "?q=john", data
)
self.assertIs(Person.objects.get(name="John Mauchly").alive, False)
def test_non_field_errors(self):
"""
Non-field errors are displayed for each of the forms in the
changelist's formset.
"""
fd1 = FoodDelivery.objects.create(
reference="123", driver="bill", restaurant="thai"
)
fd2 = FoodDelivery.objects.create(
reference="456", driver="bill", restaurant="india"
)
fd3 = FoodDelivery.objects.create(
reference="789", driver="bill", restaurant="pizza"
)
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(fd1.id),
"form-0-reference": "123",
"form-0-driver": "bill",
"form-0-restaurant": "thai",
# Same data as above: Forbidden because of unique_together!
"form-1-id": str(fd2.id),
"form-1-reference": "456",
"form-1-driver": "bill",
"form-1-restaurant": "thai",
"form-2-id": str(fd3.id),
"form-2-reference": "789",
"form-2-driver": "bill",
"form-2-restaurant": "pizza",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_fooddelivery_changelist"), data
)
self.assertContains(
response,
'<tr><td colspan="4"><ul class="errorlist nonfield"><li>Food delivery '
"with this Driver and Restaurant already exists.</li></ul></td></tr>",
1,
html=True,
)
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(fd1.id),
"form-0-reference": "123",
"form-0-driver": "bill",
"form-0-restaurant": "thai",
# Same data as above: Forbidden because of unique_together!
"form-1-id": str(fd2.id),
"form-1-reference": "456",
"form-1-driver": "bill",
"form-1-restaurant": "thai",
# Same data also.
"form-2-id": str(fd3.id),
"form-2-reference": "789",
"form-2-driver": "bill",
"form-2-restaurant": "thai",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_fooddelivery_changelist"), data
)
self.assertContains(
response,
'<tr><td colspan="4"><ul class="errorlist nonfield"><li>Food delivery '
"with this Driver and Restaurant already exists.</li></ul></td></tr>",
2,
html=True,
)
def test_non_form_errors(self):
# test if non-form errors are handled; ticket #12716
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(self.per2.pk),
"form-0-alive": "1",
"form-0-gender": "2",
# The form processing understands this as a list_editable "Save"
# and not an action "Go".
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_person_changelist"), data
)
self.assertContains(response, "Grace is not a Zombie")
def test_non_form_errors_is_errorlist(self):
# test if non-form errors are correctly handled; ticket #12878
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(self.per2.pk),
"form-0-alive": "1",
"form-0-gender": "2",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_person_changelist"), data
)
non_form_errors = response.context["cl"].formset.non_form_errors()
self.assertIsInstance(non_form_errors, ErrorList)
self.assertEqual(
str(non_form_errors),
str(ErrorList(["Grace is not a Zombie"], error_class="nonform")),
)
def test_list_editable_ordering(self):
collector = Collector.objects.create(id=1, name="Frederick Clegg")
Category.objects.create(id=1, order=1, collector=collector)
Category.objects.create(id=2, order=2, collector=collector)
Category.objects.create(id=3, order=0, collector=collector)
Category.objects.create(id=4, order=0, collector=collector)
# NB: The order values must be changed so that the items are reordered.
data = {
"form-TOTAL_FORMS": "4",
"form-INITIAL_FORMS": "4",
"form-MAX_NUM_FORMS": "0",
"form-0-order": "14",
"form-0-id": "1",
"form-0-collector": "1",
"form-1-order": "13",
"form-1-id": "2",
"form-1-collector": "1",
"form-2-order": "1",
"form-2-id": "3",
"form-2-collector": "1",
"form-3-order": "0",
"form-3-id": "4",
"form-3-collector": "1",
# The form processing understands this as a list_editable "Save"
# and not an action "Go".
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_category_changelist"), data
)
# Successful post will redirect
self.assertEqual(response.status_code, 302)
# The order values have been applied to the right objects
self.assertEqual(Category.objects.get(id=1).order, 14)
self.assertEqual(Category.objects.get(id=2).order, 13)
self.assertEqual(Category.objects.get(id=3).order, 1)
self.assertEqual(Category.objects.get(id=4).order, 0)
def test_list_editable_pagination(self):
"""
Pagination works for list_editable items.
"""
UnorderedObject.objects.create(id=1, name="Unordered object #1")
UnorderedObject.objects.create(id=2, name="Unordered object #2")
UnorderedObject.objects.create(id=3, name="Unordered object #3")
response = self.client.get(
reverse("admin:admin_views_unorderedobject_changelist")
)
self.assertContains(response, "Unordered object #3")
self.assertContains(response, "Unordered object #2")
self.assertNotContains(response, "Unordered object #1")
response = self.client.get(
reverse("admin:admin_views_unorderedobject_changelist") + "?p=2"
)
self.assertNotContains(response, "Unordered object #3")
self.assertNotContains(response, "Unordered object #2")
self.assertContains(response, "Unordered object #1")
def test_list_editable_action_submit(self):
# List editable changes should not be executed if the action "Go" button is
# used to submit the form.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "1",
"form-1-gender": "2",
"form-1-id": "2",
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "3",
"index": "0",
"_selected_action": ["3"],
"action": ["", "delete_selected"],
}
self.client.post(reverse("admin:admin_views_person_changelist"), data)
self.assertIs(Person.objects.get(name="John Mauchly").alive, True)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 1)
def test_list_editable_action_choices(self):
# List editable changes should be executed if the "Save" button is
# used to submit the form - any action choices should be ignored.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": str(self.per1.pk),
"form-1-gender": "2",
"form-1-id": str(self.per2.pk),
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": str(self.per3.pk),
"_save": "Save",
"_selected_action": ["1"],
"action": ["", "delete_selected"],
}
self.client.post(reverse("admin:admin_views_person_changelist"), data)
self.assertIs(Person.objects.get(name="John Mauchly").alive, False)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 2)
def test_list_editable_popup(self):
"""
Fields should not be list-editable in popups.
"""
response = self.client.get(reverse("admin:admin_views_person_changelist"))
self.assertNotEqual(response.context["cl"].list_editable, ())
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?%s" % IS_POPUP_VAR
)
self.assertEqual(response.context["cl"].list_editable, ())
def test_pk_hidden_fields(self):
"""
hidden pk fields aren't displayed in the table body and their
corresponding human-readable value is displayed instead. The hidden pk
fields are displayed but separately (not in the table) and only once.
"""
story1 = Story.objects.create(
title="The adventures of Guido", content="Once upon a time in Djangoland..."
)
story2 = Story.objects.create(
title="Crouching Tiger, Hidden Python",
content="The Python was sneaking into...",
)
response = self.client.get(reverse("admin:admin_views_story_changelist"))
# Only one hidden field, in a separate place than the table.
self.assertContains(response, 'id="id_form-0-id"', 1)
self.assertContains(response, 'id="id_form-1-id"', 1)
self.assertContains(
response,
'<div class="hiddenfields">\n'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id">'
'<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id">\n'
"</div>" % (story2.id, story1.id),
html=True,
)
self.assertContains(response, '<td class="field-id">%d</td>' % story1.id, 1)
self.assertContains(response, '<td class="field-id">%d</td>' % story2.id, 1)
def test_pk_hidden_fields_with_list_display_links(self):
"""Similarly as test_pk_hidden_fields, but when the hidden pk fields are
referenced in list_display_links.
Refs #12475.
"""
story1 = OtherStory.objects.create(
title="The adventures of Guido",
content="Once upon a time in Djangoland...",
)
story2 = OtherStory.objects.create(
title="Crouching Tiger, Hidden Python",
content="The Python was sneaking into...",
)
link1 = reverse("admin:admin_views_otherstory_change", args=(story1.pk,))
link2 = reverse("admin:admin_views_otherstory_change", args=(story2.pk,))
response = self.client.get(reverse("admin:admin_views_otherstory_changelist"))
# Only one hidden field, in a separate place than the table.
self.assertContains(response, 'id="id_form-0-id"', 1)
self.assertContains(response, 'id="id_form-1-id"', 1)
self.assertContains(
response,
'<div class="hiddenfields">\n'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id">'
'<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id">\n'
"</div>" % (story2.id, story1.id),
html=True,
)
self.assertContains(
response,
'<th class="field-id"><a href="%s">%d</a></th>' % (link1, story1.id),
1,
)
self.assertContains(
response,
'<th class="field-id"><a href="%s">%d</a></th>' % (link2, story2.id),
1,
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminSearchTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.joepublicuser = User.objects.create_user(
username="joepublic", password="secret"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.per1 = Person.objects.create(name="John Mauchly", gender=1, alive=True)
cls.per2 = Person.objects.create(name="Grace Hopper", gender=1, alive=False)
cls.per3 = Person.objects.create(name="Guido van Rossum", gender=1, alive=True)
Person.objects.create(name="John Doe", gender=1)
Person.objects.create(name='John O"Hara', gender=1)
Person.objects.create(name="John O'Hara", gender=1)
cls.t1 = Recommender.objects.create()
cls.t2 = Recommendation.objects.create(the_recommender=cls.t1)
cls.t3 = Recommender.objects.create()
cls.t4 = Recommendation.objects.create(the_recommender=cls.t3)
cls.tt1 = TitleTranslation.objects.create(title=cls.t1, text="Bar")
cls.tt2 = TitleTranslation.objects.create(title=cls.t2, text="Foo")
cls.tt3 = TitleTranslation.objects.create(title=cls.t3, text="Few")
cls.tt4 = TitleTranslation.objects.create(title=cls.t4, text="Bas")
def setUp(self):
self.client.force_login(self.superuser)
def test_search_on_sibling_models(self):
"A search that mentions sibling models"
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=bar"
)
# confirm the search returned 1 object
self.assertContains(response, "\n1 recommendation\n")
def test_with_fk_to_field(self):
"""
The to_field GET parameter is preserved when a search is performed.
Refs #10918.
"""
response = self.client.get(
reverse("admin:auth_user_changelist") + "?q=joe&%s=id" % TO_FIELD_VAR
)
self.assertContains(response, "\n1 user\n")
self.assertContains(
response,
'<input type="hidden" name="%s" value="id">' % TO_FIELD_VAR,
html=True,
)
def test_exact_matches(self):
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=bar"
)
# confirm the search returned one object
self.assertContains(response, "\n1 recommendation\n")
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=ba"
)
# confirm the search returned zero objects
self.assertContains(response, "\n0 recommendations\n")
def test_beginning_matches(self):
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?q=Gui"
)
# confirm the search returned one object
self.assertContains(response, "\n1 person\n")
self.assertContains(response, "Guido")
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?q=uido"
)
# confirm the search returned zero objects
self.assertContains(response, "\n0 persons\n")
self.assertNotContains(response, "Guido")
def test_pluggable_search(self):
PluggableSearchPerson.objects.create(name="Bob", age=10)
PluggableSearchPerson.objects.create(name="Amy", age=20)
response = self.client.get(
reverse("admin:admin_views_pluggablesearchperson_changelist") + "?q=Bob"
)
# confirm the search returned one object
self.assertContains(response, "\n1 pluggable search person\n")
self.assertContains(response, "Bob")
response = self.client.get(
reverse("admin:admin_views_pluggablesearchperson_changelist") + "?q=20"
)
# confirm the search returned one object
self.assertContains(response, "\n1 pluggable search person\n")
self.assertContains(response, "Amy")
def test_reset_link(self):
"""
Test presence of reset link in search bar ("1 result (_x total_)").
"""
# 1 query for session + 1 for fetching user
# + 1 for filtered result + 1 for filtered count
# + 1 for total count
with self.assertNumQueries(5):
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?q=Gui"
)
self.assertContains(
response,
"""<span class="small quiet">1 result (<a href="?">6 total</a>)</span>""",
html=True,
)
def test_no_total_count(self):
"""
#8408 -- "Show all" should be displayed instead of the total count if
ModelAdmin.show_full_result_count is False.
"""
# 1 query for session + 1 for fetching user
# + 1 for filtered result + 1 for filtered count
with self.assertNumQueries(4):
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=bar"
)
self.assertContains(
response,
"""<span class="small quiet">1 result (<a href="?">Show all</a>)</span>""",
html=True,
)
self.assertTrue(response.context["cl"].show_admin_actions)
def test_search_with_spaces(self):
url = reverse("admin:admin_views_person_changelist") + "?q=%s"
tests = [
('"John Doe"', 1),
("'John Doe'", 1),
("John Doe", 0),
('"John Doe" John', 1),
("'John Doe' John", 1),
("John Doe John", 0),
('"John Do"', 1),
("'John Do'", 1),
("'John O'Hara'", 0),
("'John O\\'Hara'", 1),
('"John O"Hara"', 0),
('"John O\\"Hara"', 1),
]
for search, hits in tests:
with self.subTest(search=search):
response = self.client.get(url % search)
self.assertContains(response, "\n%s person" % hits)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminInheritedInlinesTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_inline(self):
"""
Inline models which inherit from a common parent are correctly handled.
"""
foo_user = "foo username"
bar_user = "bar username"
name_re = re.compile(b'name="(.*?)"')
# test the add case
response = self.client.get(reverse("admin:admin_views_persona_add"))
names = name_re.findall(response.content)
names.remove(b"csrfmiddlewaretoken")
# make sure we have no duplicate HTML names
self.assertEqual(len(names), len(set(names)))
# test the add case
post_data = {
"name": "Test Name",
# inline data
"accounts-TOTAL_FORMS": "1",
"accounts-INITIAL_FORMS": "0",
"accounts-MAX_NUM_FORMS": "0",
"accounts-0-username": foo_user,
"accounts-2-TOTAL_FORMS": "1",
"accounts-2-INITIAL_FORMS": "0",
"accounts-2-MAX_NUM_FORMS": "0",
"accounts-2-0-username": bar_user,
}
response = self.client.post(reverse("admin:admin_views_persona_add"), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
self.assertEqual(Persona.objects.count(), 1)
self.assertEqual(FooAccount.objects.count(), 1)
self.assertEqual(BarAccount.objects.count(), 1)
self.assertEqual(FooAccount.objects.all()[0].username, foo_user)
self.assertEqual(BarAccount.objects.all()[0].username, bar_user)
self.assertEqual(Persona.objects.all()[0].accounts.count(), 2)
persona_id = Persona.objects.all()[0].id
foo_id = FooAccount.objects.all()[0].id
bar_id = BarAccount.objects.all()[0].id
# test the edit case
response = self.client.get(
reverse("admin:admin_views_persona_change", args=(persona_id,))
)
names = name_re.findall(response.content)
names.remove(b"csrfmiddlewaretoken")
# make sure we have no duplicate HTML names
self.assertEqual(len(names), len(set(names)))
post_data = {
"name": "Test Name",
"accounts-TOTAL_FORMS": "2",
"accounts-INITIAL_FORMS": "1",
"accounts-MAX_NUM_FORMS": "0",
"accounts-0-username": "%s-1" % foo_user,
"accounts-0-account_ptr": str(foo_id),
"accounts-0-persona": str(persona_id),
"accounts-2-TOTAL_FORMS": "2",
"accounts-2-INITIAL_FORMS": "1",
"accounts-2-MAX_NUM_FORMS": "0",
"accounts-2-0-username": "%s-1" % bar_user,
"accounts-2-0-account_ptr": str(bar_id),
"accounts-2-0-persona": str(persona_id),
}
response = self.client.post(
reverse("admin:admin_views_persona_change", args=(persona_id,)), post_data
)
self.assertEqual(response.status_code, 302)
self.assertEqual(Persona.objects.count(), 1)
self.assertEqual(FooAccount.objects.count(), 1)
self.assertEqual(BarAccount.objects.count(), 1)
self.assertEqual(FooAccount.objects.all()[0].username, "%s-1" % foo_user)
self.assertEqual(BarAccount.objects.all()[0].username, "%s-1" % bar_user)
self.assertEqual(Persona.objects.all()[0].accounts.count(), 2)
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestCustomChangeList(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_custom_changelist(self):
"""
Validate that a custom ChangeList class can be used (#9749)
"""
# Insert some data
post_data = {"name": "First Gadget"}
response = self.client.post(reverse("admin:admin_views_gadget_add"), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
# Hit the page once to get messages out of the queue message list
response = self.client.get(reverse("admin:admin_views_gadget_changelist"))
# Data is still not visible on the page
response = self.client.get(reverse("admin:admin_views_gadget_changelist"))
self.assertNotContains(response, "First Gadget")
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestInlineNotEditable(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_GET_parent_add(self):
"""
InlineModelAdmin broken?
"""
response = self.client.get(reverse("admin:admin_views_parent_add"))
self.assertEqual(response.status_code, 200)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminCustomQuerysetTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.pks = [EmptyModel.objects.create().id for i in range(3)]
def setUp(self):
self.client.force_login(self.superuser)
self.super_login = {
REDIRECT_FIELD_NAME: reverse("admin:index"),
"username": "super",
"password": "secret",
}
def test_changelist_view(self):
response = self.client.get(reverse("admin:admin_views_emptymodel_changelist"))
for i in self.pks:
if i > 1:
self.assertContains(response, "Primary key = %s" % i)
else:
self.assertNotContains(response, "Primary key = %s" % i)
def test_changelist_view_count_queries(self):
# create 2 Person objects
Person.objects.create(name="person1", gender=1)
Person.objects.create(name="person2", gender=2)
changelist_url = reverse("admin:admin_views_person_changelist")
# 5 queries are expected: 1 for the session, 1 for the user,
# 2 for the counts and 1 for the objects on the page
with self.assertNumQueries(5):
resp = self.client.get(changelist_url)
self.assertEqual(resp.context["selection_note"], "0 of 2 selected")
self.assertEqual(resp.context["selection_note_all"], "All 2 selected")
with self.assertNumQueries(5):
extra = {"q": "not_in_name"}
resp = self.client.get(changelist_url, extra)
self.assertEqual(resp.context["selection_note"], "0 of 0 selected")
self.assertEqual(resp.context["selection_note_all"], "All 0 selected")
with self.assertNumQueries(5):
extra = {"q": "person"}
resp = self.client.get(changelist_url, extra)
self.assertEqual(resp.context["selection_note"], "0 of 2 selected")
self.assertEqual(resp.context["selection_note_all"], "All 2 selected")
with self.assertNumQueries(5):
extra = {"gender__exact": "1"}
resp = self.client.get(changelist_url, extra)
self.assertEqual(resp.context["selection_note"], "0 of 1 selected")
self.assertEqual(resp.context["selection_note_all"], "1 selected")
def test_change_view(self):
for i in self.pks:
url = reverse("admin:admin_views_emptymodel_change", args=(i,))
response = self.client.get(url, follow=True)
if i > 1:
self.assertEqual(response.status_code, 200)
else:
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["empty model with ID “1” doesn’t exist. Perhaps it was deleted?"],
)
def test_add_model_modeladmin_defer_qs(self):
# Test for #14529. defer() is used in ModelAdmin.get_queryset()
# model has __str__ method
self.assertEqual(CoverLetter.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"author": "Candidate, Best",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_coverletter_add"), post_data, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(CoverLetter.objects.count(), 1)
# Message should contain non-ugly model verbose name
pk = CoverLetter.objects.all()[0].pk
self.assertContains(
response,
'<li class="success">The cover letter “<a href="%s">'
"Candidate, Best</a>” was added successfully.</li>"
% reverse("admin:admin_views_coverletter_change", args=(pk,)),
html=True,
)
# model has no __str__ method
self.assertEqual(ShortMessage.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"content": "What's this SMS thing?",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_shortmessage_add"), post_data, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(ShortMessage.objects.count(), 1)
# Message should contain non-ugly model verbose name
sm = ShortMessage.objects.all()[0]
self.assertContains(
response,
'<li class="success">The short message “<a href="%s">'
"%s</a>” was added successfully.</li>"
% (reverse("admin:admin_views_shortmessage_change", args=(sm.pk,)), sm),
html=True,
)
def test_add_model_modeladmin_only_qs(self):
# Test for #14529. only() is used in ModelAdmin.get_queryset()
# model has __str__ method
self.assertEqual(Telegram.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"title": "Urgent telegram",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_telegram_add"), post_data, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Telegram.objects.count(), 1)
# Message should contain non-ugly model verbose name
pk = Telegram.objects.all()[0].pk
self.assertContains(
response,
'<li class="success">The telegram “<a href="%s">'
"Urgent telegram</a>” was added successfully.</li>"
% reverse("admin:admin_views_telegram_change", args=(pk,)),
html=True,
)
# model has no __str__ method
self.assertEqual(Paper.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"title": "My Modified Paper Title",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_paper_add"), post_data, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Paper.objects.count(), 1)
# Message should contain non-ugly model verbose name
p = Paper.objects.all()[0]
self.assertContains(
response,
'<li class="success">The paper “<a href="%s">'
"%s</a>” was added successfully.</li>"
% (reverse("admin:admin_views_paper_change", args=(p.pk,)), p),
html=True,
)
def test_edit_model_modeladmin_defer_qs(self):
# Test for #14529. defer() is used in ModelAdmin.get_queryset()
# model has __str__ method
cl = CoverLetter.objects.create(author="John Doe")
self.assertEqual(CoverLetter.objects.count(), 1)
response = self.client.get(
reverse("admin:admin_views_coverletter_change", args=(cl.pk,))
)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"author": "John Doe II",
"_save": "Save",
}
url = reverse("admin:admin_views_coverletter_change", args=(cl.pk,))
response = self.client.post(url, post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(CoverLetter.objects.count(), 1)
# Message should contain non-ugly model verbose name. Instance
# representation is set by model's __str__()
self.assertContains(
response,
'<li class="success">The cover letter “<a href="%s">'
"John Doe II</a>” was changed successfully.</li>"
% reverse("admin:admin_views_coverletter_change", args=(cl.pk,)),
html=True,
)
# model has no __str__ method
sm = ShortMessage.objects.create(content="This is expensive")
self.assertEqual(ShortMessage.objects.count(), 1)
response = self.client.get(
reverse("admin:admin_views_shortmessage_change", args=(sm.pk,))
)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"content": "Too expensive",
"_save": "Save",
}
url = reverse("admin:admin_views_shortmessage_change", args=(sm.pk,))
response = self.client.post(url, post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(ShortMessage.objects.count(), 1)
# Message should contain non-ugly model verbose name. The ugly(!)
# instance representation is set by __str__().
self.assertContains(
response,
'<li class="success">The short message “<a href="%s">'
"%s</a>” was changed successfully.</li>"
% (reverse("admin:admin_views_shortmessage_change", args=(sm.pk,)), sm),
html=True,
)
def test_edit_model_modeladmin_only_qs(self):
# Test for #14529. only() is used in ModelAdmin.get_queryset()
# model has __str__ method
t = Telegram.objects.create(title="First Telegram")
self.assertEqual(Telegram.objects.count(), 1)
response = self.client.get(
reverse("admin:admin_views_telegram_change", args=(t.pk,))
)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"title": "Telegram without typo",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_telegram_change", args=(t.pk,)),
post_data,
follow=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Telegram.objects.count(), 1)
# Message should contain non-ugly model verbose name. The instance
# representation is set by model's __str__()
self.assertContains(
response,
'<li class="success">The telegram “<a href="%s">'
"Telegram without typo</a>” was changed successfully.</li>"
% reverse("admin:admin_views_telegram_change", args=(t.pk,)),
html=True,
)
# model has no __str__ method
p = Paper.objects.create(title="My Paper Title")
self.assertEqual(Paper.objects.count(), 1)
response = self.client.get(
reverse("admin:admin_views_paper_change", args=(p.pk,))
)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"title": "My Modified Paper Title",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_paper_change", args=(p.pk,)),
post_data,
follow=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Paper.objects.count(), 1)
# Message should contain non-ugly model verbose name. The ugly(!)
# instance representation is set by __str__().
self.assertContains(
response,
'<li class="success">The paper “<a href="%s">'
"%s</a>” was changed successfully.</li>"
% (reverse("admin:admin_views_paper_change", args=(p.pk,)), p),
html=True,
)
def test_history_view_custom_qs(self):
"""
Custom querysets are considered for the admin history view.
"""
self.client.post(reverse("admin:login"), self.super_login)
FilteredManager.objects.create(pk=1)
FilteredManager.objects.create(pk=2)
response = self.client.get(
reverse("admin:admin_views_filteredmanager_changelist")
)
self.assertContains(response, "PK=1")
self.assertContains(response, "PK=2")
self.assertEqual(
self.client.get(
reverse("admin:admin_views_filteredmanager_history", args=(1,))
).status_code,
200,
)
self.assertEqual(
self.client.get(
reverse("admin:admin_views_filteredmanager_history", args=(2,))
).status_code,
200,
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminInlineFileUploadTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
file1 = tempfile.NamedTemporaryFile(suffix=".file1")
file1.write(b"a" * (2**21))
filename = file1.name
file1.close()
cls.gallery = Gallery.objects.create(name="Test Gallery")
cls.picture = Picture.objects.create(
name="Test Picture",
image=filename,
gallery=cls.gallery,
)
def setUp(self):
self.client.force_login(self.superuser)
def test_form_has_multipart_enctype(self):
response = self.client.get(
reverse("admin:admin_views_gallery_change", args=(self.gallery.id,))
)
self.assertIs(response.context["has_file_field"], True)
self.assertContains(response, MULTIPART_ENCTYPE)
def test_inline_file_upload_edit_validation_error_post(self):
"""
Inline file uploads correctly display prior data (#10002).
"""
post_data = {
"name": "Test Gallery",
"pictures-TOTAL_FORMS": "2",
"pictures-INITIAL_FORMS": "1",
"pictures-MAX_NUM_FORMS": "0",
"pictures-0-id": str(self.picture.id),
"pictures-0-gallery": str(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post(
reverse("admin:admin_views_gallery_change", args=(self.gallery.id,)),
post_data,
)
self.assertContains(response, b"Currently")
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminInlineTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.collector = Collector.objects.create(pk=1, name="John Fowles")
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
"widget_set-0-name": "",
"widget_set-1-id": "",
"widget_set-1-owner": "1",
"widget_set-1-name": "",
"widget_set-2-id": "",
"widget_set-2-owner": "1",
"widget_set-2-name": "",
"doohickey_set-TOTAL_FORMS": "3",
"doohickey_set-INITIAL_FORMS": "0",
"doohickey_set-MAX_NUM_FORMS": "0",
"doohickey_set-0-owner": "1",
"doohickey_set-0-code": "",
"doohickey_set-0-name": "",
"doohickey_set-1-owner": "1",
"doohickey_set-1-code": "",
"doohickey_set-1-name": "",
"doohickey_set-2-owner": "1",
"doohickey_set-2-code": "",
"doohickey_set-2-name": "",
"grommet_set-TOTAL_FORMS": "3",
"grommet_set-INITIAL_FORMS": "0",
"grommet_set-MAX_NUM_FORMS": "0",
"grommet_set-0-code": "",
"grommet_set-0-owner": "1",
"grommet_set-0-name": "",
"grommet_set-1-code": "",
"grommet_set-1-owner": "1",
"grommet_set-1-name": "",
"grommet_set-2-code": "",
"grommet_set-2-owner": "1",
"grommet_set-2-name": "",
"whatsit_set-TOTAL_FORMS": "3",
"whatsit_set-INITIAL_FORMS": "0",
"whatsit_set-MAX_NUM_FORMS": "0",
"whatsit_set-0-owner": "1",
"whatsit_set-0-index": "",
"whatsit_set-0-name": "",
"whatsit_set-1-owner": "1",
"whatsit_set-1-index": "",
"whatsit_set-1-name": "",
"whatsit_set-2-owner": "1",
"whatsit_set-2-index": "",
"whatsit_set-2-name": "",
"fancydoodad_set-TOTAL_FORMS": "3",
"fancydoodad_set-INITIAL_FORMS": "0",
"fancydoodad_set-MAX_NUM_FORMS": "0",
"fancydoodad_set-0-doodad_ptr": "",
"fancydoodad_set-0-owner": "1",
"fancydoodad_set-0-name": "",
"fancydoodad_set-0-expensive": "on",
"fancydoodad_set-1-doodad_ptr": "",
"fancydoodad_set-1-owner": "1",
"fancydoodad_set-1-name": "",
"fancydoodad_set-1-expensive": "on",
"fancydoodad_set-2-doodad_ptr": "",
"fancydoodad_set-2-owner": "1",
"fancydoodad_set-2-name": "",
"fancydoodad_set-2-expensive": "on",
"category_set-TOTAL_FORMS": "3",
"category_set-INITIAL_FORMS": "0",
"category_set-MAX_NUM_FORMS": "0",
"category_set-0-order": "",
"category_set-0-id": "",
"category_set-0-collector": "1",
"category_set-1-order": "",
"category_set-1-id": "",
"category_set-1-collector": "1",
"category_set-2-order": "",
"category_set-2-id": "",
"category_set-2-collector": "1",
}
self.client.force_login(self.superuser)
def test_simple_inline(self):
"A simple model can be saved as inlines"
# First add a new inline
self.post_data["widget_set-0-name"] = "Widget 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1")
widget_id = Widget.objects.all()[0].id
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="widget_set-0-id"')
# No file or image fields, no enctype on the forms
self.assertIs(response.context["has_file_field"], False)
self.assertNotContains(response, MULTIPART_ENCTYPE)
# Now resave that inline
self.post_data["widget_set-INITIAL_FORMS"] = "1"
self.post_data["widget_set-0-id"] = str(widget_id)
self.post_data["widget_set-0-name"] = "Widget 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1")
# Now modify that inline
self.post_data["widget_set-INITIAL_FORMS"] = "1"
self.post_data["widget_set-0-id"] = str(widget_id)
self.post_data["widget_set-0-name"] = "Widget 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1 Updated")
def test_explicit_autofield_inline(self):
"""
A model with an explicit autofield primary key can be saved as inlines.
"""
# First add a new inline
self.post_data["grommet_set-0-name"] = "Grommet 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1")
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="grommet_set-0-code"')
# Now resave that inline
self.post_data["grommet_set-INITIAL_FORMS"] = "1"
self.post_data["grommet_set-0-code"] = str(Grommet.objects.all()[0].code)
self.post_data["grommet_set-0-name"] = "Grommet 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1")
# Now modify that inline
self.post_data["grommet_set-INITIAL_FORMS"] = "1"
self.post_data["grommet_set-0-code"] = str(Grommet.objects.all()[0].code)
self.post_data["grommet_set-0-name"] = "Grommet 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1 Updated")
def test_char_pk_inline(self):
"A model with a character PK can be saved as inlines. Regression for #10992"
# First add a new inline
self.post_data["doohickey_set-0-code"] = "DH1"
self.post_data["doohickey_set-0-name"] = "Doohickey 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1")
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="doohickey_set-0-code"')
# Now resave that inline
self.post_data["doohickey_set-INITIAL_FORMS"] = "1"
self.post_data["doohickey_set-0-code"] = "DH1"
self.post_data["doohickey_set-0-name"] = "Doohickey 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1")
# Now modify that inline
self.post_data["doohickey_set-INITIAL_FORMS"] = "1"
self.post_data["doohickey_set-0-code"] = "DH1"
self.post_data["doohickey_set-0-name"] = "Doohickey 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1 Updated")
def test_integer_pk_inline(self):
"A model with an integer PK can be saved as inlines. Regression for #10992"
# First add a new inline
self.post_data["whatsit_set-0-index"] = "42"
self.post_data["whatsit_set-0-name"] = "Whatsit 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1")
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="whatsit_set-0-index"')
# Now resave that inline
self.post_data["whatsit_set-INITIAL_FORMS"] = "1"
self.post_data["whatsit_set-0-index"] = "42"
self.post_data["whatsit_set-0-name"] = "Whatsit 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1")
# Now modify that inline
self.post_data["whatsit_set-INITIAL_FORMS"] = "1"
self.post_data["whatsit_set-0-index"] = "42"
self.post_data["whatsit_set-0-name"] = "Whatsit 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1 Updated")
def test_inherited_inline(self):
"An inherited model can be saved as inlines. Regression for #11042"
# First add a new inline
self.post_data["fancydoodad_set-0-name"] = "Fancy Doodad 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1")
doodad_pk = FancyDoodad.objects.all()[0].pk
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="fancydoodad_set-0-doodad_ptr"')
# Now resave that inline
self.post_data["fancydoodad_set-INITIAL_FORMS"] = "1"
self.post_data["fancydoodad_set-0-doodad_ptr"] = str(doodad_pk)
self.post_data["fancydoodad_set-0-name"] = "Fancy Doodad 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1")
# Now modify that inline
self.post_data["fancydoodad_set-INITIAL_FORMS"] = "1"
self.post_data["fancydoodad_set-0-doodad_ptr"] = str(doodad_pk)
self.post_data["fancydoodad_set-0-name"] = "Fancy Doodad 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1 Updated")
def test_ordered_inline(self):
"""
An inline with an editable ordering fields is updated correctly.
"""
# Create some objects with an initial ordering
Category.objects.create(id=1, order=1, collector=self.collector)
Category.objects.create(id=2, order=2, collector=self.collector)
Category.objects.create(id=3, order=0, collector=self.collector)
Category.objects.create(id=4, order=0, collector=self.collector)
# NB: The order values must be changed so that the items are reordered.
self.post_data.update(
{
"name": "Frederick Clegg",
"category_set-TOTAL_FORMS": "7",
"category_set-INITIAL_FORMS": "4",
"category_set-MAX_NUM_FORMS": "0",
"category_set-0-order": "14",
"category_set-0-id": "1",
"category_set-0-collector": "1",
"category_set-1-order": "13",
"category_set-1-id": "2",
"category_set-1-collector": "1",
"category_set-2-order": "1",
"category_set-2-id": "3",
"category_set-2-collector": "1",
"category_set-3-order": "0",
"category_set-3-id": "4",
"category_set-3-collector": "1",
"category_set-4-order": "",
"category_set-4-id": "",
"category_set-4-collector": "1",
"category_set-5-order": "",
"category_set-5-id": "",
"category_set-5-collector": "1",
"category_set-6-order": "",
"category_set-6-id": "",
"category_set-6-collector": "1",
}
)
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
# Successful post will redirect
self.assertEqual(response.status_code, 302)
# The order values have been applied to the right objects
self.assertEqual(self.collector.category_set.count(), 4)
self.assertEqual(Category.objects.get(id=1).order, 14)
self.assertEqual(Category.objects.get(id=2).order, 13)
self.assertEqual(Category.objects.get(id=3).order, 1)
self.assertEqual(Category.objects.get(id=4).order, 0)
@override_settings(ROOT_URLCONF="admin_views.urls")
class NeverCacheTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
def setUp(self):
self.client.force_login(self.superuser)
def test_admin_index(self):
"Check the never-cache status of the main index"
response = self.client.get(reverse("admin:index"))
self.assertEqual(get_max_age(response), 0)
def test_app_index(self):
"Check the never-cache status of an application index"
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertEqual(get_max_age(response), 0)
def test_model_index(self):
"Check the never-cache status of a model index"
response = self.client.get(reverse("admin:admin_views_fabric_changelist"))
self.assertEqual(get_max_age(response), 0)
def test_model_add(self):
"Check the never-cache status of a model add page"
response = self.client.get(reverse("admin:admin_views_fabric_add"))
self.assertEqual(get_max_age(response), 0)
def test_model_view(self):
"Check the never-cache status of a model edit page"
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(get_max_age(response), 0)
def test_model_history(self):
"Check the never-cache status of a model history page"
response = self.client.get(
reverse("admin:admin_views_section_history", args=(self.s1.pk,))
)
self.assertEqual(get_max_age(response), 0)
def test_model_delete(self):
"Check the never-cache status of a model delete page"
response = self.client.get(
reverse("admin:admin_views_section_delete", args=(self.s1.pk,))
)
self.assertEqual(get_max_age(response), 0)
def test_login(self):
"Check the never-cache status of login views"
self.client.logout()
response = self.client.get(reverse("admin:index"))
self.assertEqual(get_max_age(response), 0)
def test_logout(self):
"Check the never-cache status of logout view"
response = self.client.post(reverse("admin:logout"))
self.assertEqual(get_max_age(response), 0)
def test_password_change(self):
"Check the never-cache status of the password change view"
self.client.logout()
response = self.client.get(reverse("admin:password_change"))
self.assertIsNone(get_max_age(response))
def test_password_change_done(self):
"Check the never-cache status of the password change done view"
response = self.client.get(reverse("admin:password_change_done"))
self.assertIsNone(get_max_age(response))
def test_JS_i18n(self):
"Check the never-cache status of the JavaScript i18n view"
response = self.client.get(reverse("admin:jsi18n"))
self.assertIsNone(get_max_age(response))
@override_settings(ROOT_URLCONF="admin_views.urls")
class PrePopulatedTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_prepopulated_on(self):
response = self.client.get(reverse("admin:admin_views_prepopulatedpost_add"))
self.assertContains(response, ""id": "#id_slug"")
self.assertContains(
response, ""dependency_ids": ["#id_title"]"
)
self.assertContains(
response,
""id": "#id_prepopulatedsubpost_set-0-subslug"",
)
def test_prepopulated_off(self):
response = self.client.get(
reverse("admin:admin_views_prepopulatedpost_change", args=(self.p1.pk,))
)
self.assertContains(response, "A Long Title")
self.assertNotContains(response, ""id": "#id_slug"")
self.assertNotContains(
response, ""dependency_ids": ["#id_title"]"
)
self.assertNotContains(
response,
""id": "#id_prepopulatedsubpost_set-0-subslug"",
)
@override_settings(USE_THOUSAND_SEPARATOR=True)
def test_prepopulated_maxlength_localized(self):
"""
Regression test for #15938: if USE_THOUSAND_SEPARATOR is set, make sure
that maxLength (in the JavaScript) is rendered without separators.
"""
response = self.client.get(
reverse("admin:admin_views_prepopulatedpostlargeslug_add")
)
self.assertContains(response, ""maxLength": 1000") # instead of 1,000
def test_view_only_add_form(self):
"""
PrePopulatedPostReadOnlyAdmin.prepopulated_fields includes 'slug'
which is present in the add view, even if the
ModelAdmin.has_change_permission() returns False.
"""
response = self.client.get(reverse("admin7:admin_views_prepopulatedpost_add"))
self.assertContains(response, "data-prepopulated-fields=")
self.assertContains(response, ""id": "#id_slug"")
def test_view_only_change_form(self):
"""
PrePopulatedPostReadOnlyAdmin.prepopulated_fields includes 'slug'. That
doesn't break a view-only change view.
"""
response = self.client.get(
reverse("admin7:admin_views_prepopulatedpost_change", args=(self.p1.pk,))
)
self.assertContains(response, 'data-prepopulated-fields="[]"')
self.assertContains(response, '<div class="readonly">%s</div>' % self.p1.slug)
@override_settings(ROOT_URLCONF="admin_views.urls")
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ["admin_views"] + AdminSeleniumTestCase.available_apps
def setUp(self):
self.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
self.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
def test_login_button_centered(self):
from selenium.webdriver.common.by import By
self.selenium.get(self.live_server_url + reverse("admin:login"))
button = self.selenium.find_element(By.CSS_SELECTOR, ".submit-row input")
offset_left = button.get_property("offsetLeft")
offset_right = button.get_property("offsetParent").get_property(
"offsetWidth"
) - (offset_left + button.get_property("offsetWidth"))
# Use assertAlmostEqual to avoid pixel rounding errors.
self.assertAlmostEqual(offset_left, offset_right, delta=3)
def test_prepopulated_fields(self):
"""
The JavaScript-automated prepopulated fields work with the main form
and with stacked and tabular inlines.
Refs #13068, #9264, #9983, #9784.
"""
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_mainprepopulated_add")
)
self.wait_for(".select2")
# Main form ----------------------------------------------------------
self.selenium.find_element(By.ID, "id_pubdate").send_keys("2012-02-18")
self.select_option("#id_status", "option two")
self.selenium.find_element(By.ID, "id_name").send_keys(
" the mAin nÀMë and it's awεšomeıııİ"
)
slug1 = self.selenium.find_element(By.ID, "id_slug1").get_attribute("value")
slug2 = self.selenium.find_element(By.ID, "id_slug2").get_attribute("value")
slug3 = self.selenium.find_element(By.ID, "id_slug3").get_attribute("value")
self.assertEqual(slug1, "the-main-name-and-its-awesomeiiii-2012-02-18")
self.assertEqual(slug2, "option-two-the-main-name-and-its-awesomeiiii")
self.assertEqual(
slug3, "the-main-n\xe0m\xeb-and-its-aw\u03b5\u0161ome\u0131\u0131\u0131i"
)
# Stacked inlines with fieldsets -------------------------------------
# Initial inline
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-0-pubdate"
).send_keys("2011-12-17")
self.select_option("#id_relatedprepopulated_set-0-status", "option one")
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-0-name"
).send_keys(" here is a sŤāÇkeð inline ! ")
slug1 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-0-slug1"
).get_attribute("value")
slug2 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-0-slug2"
).get_attribute("value")
self.assertEqual(slug1, "here-is-a-stacked-inline-2011-12-17")
self.assertEqual(slug2, "option-one-here-is-a-stacked-inline")
initial_select2_inputs = self.selenium.find_elements(
By.CLASS_NAME, "select2-selection"
)
# Inline formsets have empty/invisible forms.
# Only the 4 visible select2 inputs are initialized.
num_initial_select2_inputs = len(initial_select2_inputs)
self.assertEqual(num_initial_select2_inputs, 4)
# Add an inline
self.selenium.find_elements(By.LINK_TEXT, "Add another Related prepopulated")[
0
].click()
self.assertEqual(
len(self.selenium.find_elements(By.CLASS_NAME, "select2-selection")),
num_initial_select2_inputs + 2,
)
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-1-pubdate"
).send_keys("1999-01-25")
self.select_option("#id_relatedprepopulated_set-1-status", "option two")
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-1-name"
).send_keys(
" now you haVe anöther sŤāÇkeð inline with a very ... "
"loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooog "
"text... "
)
slug1 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-1-slug1"
).get_attribute("value")
slug2 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-1-slug2"
).get_attribute("value")
# 50 characters maximum for slug1 field
self.assertEqual(slug1, "now-you-have-another-stacked-inline-with-a-very-lo")
# 60 characters maximum for slug2 field
self.assertEqual(
slug2, "option-two-now-you-have-another-stacked-inline-with-a-very-l"
)
# Tabular inlines ----------------------------------------------------
# Initial inline
element = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-status"
)
self.selenium.execute_script("window.scrollTo(0, %s);" % element.location["y"])
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-pubdate"
).send_keys("1234-12-07")
self.select_option("#id_relatedprepopulated_set-2-0-status", "option two")
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-name"
).send_keys("And now, with a tÃbűlaŘ inline !!!")
slug1 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-slug1"
).get_attribute("value")
slug2 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-0-slug2"
).get_attribute("value")
self.assertEqual(slug1, "and-now-with-a-tabular-inline-1234-12-07")
self.assertEqual(slug2, "option-two-and-now-with-a-tabular-inline")
# Add an inline
# Button may be outside the browser frame.
element = self.selenium.find_elements(
By.LINK_TEXT, "Add another Related prepopulated"
)[1]
self.selenium.execute_script("window.scrollTo(0, %s);" % element.location["y"])
element.click()
self.assertEqual(
len(self.selenium.find_elements(By.CLASS_NAME, "select2-selection")),
num_initial_select2_inputs + 4,
)
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-1-pubdate"
).send_keys("1981-08-22")
self.select_option("#id_relatedprepopulated_set-2-1-status", "option one")
self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-1-name"
).send_keys(r'tÃbűlaŘ inline with ignored ;"&*^\%$#@-/`~ characters')
slug1 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-1-slug1"
).get_attribute("value")
slug2 = self.selenium.find_element(
By.ID, "id_relatedprepopulated_set-2-1-slug2"
).get_attribute("value")
self.assertEqual(slug1, "tabular-inline-with-ignored-characters-1981-08-22")
self.assertEqual(slug2, "option-one-tabular-inline-with-ignored-characters")
# Add an inline without an initial inline.
# The button is outside of the browser frame.
self.selenium.execute_script("window.scrollTo(0, document.body.scrollHeight);")
self.selenium.find_elements(By.LINK_TEXT, "Add another Related prepopulated")[
2
].click()
self.assertEqual(
len(self.selenium.find_elements(By.CLASS_NAME, "select2-selection")),
num_initial_select2_inputs + 6,
)
# Stacked Inlines without fieldsets ----------------------------------
# Initial inline.
row_id = "id_relatedprepopulated_set-4-0-"
self.selenium.find_element(By.ID, f"{row_id}pubdate").send_keys("2011-12-12")
self.select_option(f"#{row_id}status", "option one")
self.selenium.find_element(By.ID, f"{row_id}name").send_keys(
" sŤāÇkeð inline ! "
)
slug1 = self.selenium.find_element(By.ID, f"{row_id}slug1").get_attribute(
"value"
)
slug2 = self.selenium.find_element(By.ID, f"{row_id}slug2").get_attribute(
"value"
)
self.assertEqual(slug1, "stacked-inline-2011-12-12")
self.assertEqual(slug2, "option-one")
# Add inline.
self.selenium.find_elements(
By.LINK_TEXT,
"Add another Related prepopulated",
)[3].click()
row_id = "id_relatedprepopulated_set-4-1-"
self.selenium.find_element(By.ID, f"{row_id}pubdate").send_keys("1999-01-20")
self.select_option(f"#{row_id}status", "option two")
self.selenium.find_element(By.ID, f"{row_id}name").send_keys(
" now you haVe anöther sŤāÇkeð inline with a very loooong "
)
slug1 = self.selenium.find_element(By.ID, f"{row_id}slug1").get_attribute(
"value"
)
slug2 = self.selenium.find_element(By.ID, f"{row_id}slug2").get_attribute(
"value"
)
self.assertEqual(slug1, "now-you-have-another-stacked-inline-with-a-very-lo")
self.assertEqual(slug2, "option-two")
# Save and check that everything is properly stored in the database
with self.wait_page_loaded():
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.assertEqual(MainPrepopulated.objects.count(), 1)
MainPrepopulated.objects.get(
name=" the mAin nÀMë and it's awεšomeıııİ",
pubdate="2012-02-18",
status="option two",
slug1="the-main-name-and-its-awesomeiiii-2012-02-18",
slug2="option-two-the-main-name-and-its-awesomeiiii",
slug3="the-main-nàmë-and-its-awεšomeıııi",
)
self.assertEqual(RelatedPrepopulated.objects.count(), 6)
RelatedPrepopulated.objects.get(
name=" here is a sŤāÇkeð inline ! ",
pubdate="2011-12-17",
status="option one",
slug1="here-is-a-stacked-inline-2011-12-17",
slug2="option-one-here-is-a-stacked-inline",
)
RelatedPrepopulated.objects.get(
# 75 characters in name field
name=(
" now you haVe anöther sŤāÇkeð inline with a very ... "
"loooooooooooooooooo"
),
pubdate="1999-01-25",
status="option two",
slug1="now-you-have-another-stacked-inline-with-a-very-lo",
slug2="option-two-now-you-have-another-stacked-inline-with-a-very-l",
)
RelatedPrepopulated.objects.get(
name="And now, with a tÃbűlaŘ inline !!!",
pubdate="1234-12-07",
status="option two",
slug1="and-now-with-a-tabular-inline-1234-12-07",
slug2="option-two-and-now-with-a-tabular-inline",
)
RelatedPrepopulated.objects.get(
name=r'tÃbűlaŘ inline with ignored ;"&*^\%$#@-/`~ characters',
pubdate="1981-08-22",
status="option one",
slug1="tabular-inline-with-ignored-characters-1981-08-22",
slug2="option-one-tabular-inline-with-ignored-characters",
)
def test_populate_existing_object(self):
"""
The prepopulation works for existing objects too, as long as
the original field is empty (#19082).
"""
from selenium.webdriver.common.by import By
# Slugs are empty to start with.
item = MainPrepopulated.objects.create(
name=" this is the mAin nÀMë",
pubdate="2012-02-18",
status="option two",
slug1="",
slug2="",
)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
object_url = self.live_server_url + reverse(
"admin:admin_views_mainprepopulated_change", args=(item.id,)
)
self.selenium.get(object_url)
self.selenium.find_element(By.ID, "id_name").send_keys(" the best")
# The slugs got prepopulated since they were originally empty
slug1 = self.selenium.find_element(By.ID, "id_slug1").get_attribute("value")
slug2 = self.selenium.find_element(By.ID, "id_slug2").get_attribute("value")
self.assertEqual(slug1, "this-is-the-main-name-the-best-2012-02-18")
self.assertEqual(slug2, "option-two-this-is-the-main-name-the-best")
# Save the object
with self.wait_page_loaded():
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.get(object_url)
self.selenium.find_element(By.ID, "id_name").send_keys(" hello")
# The slugs got prepopulated didn't change since they were originally not empty
slug1 = self.selenium.find_element(By.ID, "id_slug1").get_attribute("value")
slug2 = self.selenium.find_element(By.ID, "id_slug2").get_attribute("value")
self.assertEqual(slug1, "this-is-the-main-name-the-best-2012-02-18")
self.assertEqual(slug2, "option-two-this-is-the-main-name-the-best")
def test_collapsible_fieldset(self):
"""
The 'collapse' class in fieldsets definition allows to
show/hide the appropriate field section.
"""
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_article_add")
)
self.assertFalse(self.selenium.find_element(By.ID, "id_title").is_displayed())
self.selenium.find_elements(By.LINK_TEXT, "Show")[0].click()
self.assertTrue(self.selenium.find_element(By.ID, "id_title").is_displayed())
self.assertEqual(
self.selenium.find_element(By.ID, "fieldsetcollapser0").text, "Hide"
)
def test_selectbox_height_collapsible_fieldset(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super",
password="secret",
login_url=reverse("admin7:index"),
)
url = self.live_server_url + reverse("admin7:admin_views_pizza_add")
self.selenium.get(url)
self.selenium.find_elements(By.LINK_TEXT, "Show")[0].click()
from_filter_box = self.selenium.find_element(By.ID, "id_toppings_filter")
from_box = self.selenium.find_element(By.ID, "id_toppings_from")
to_filter_box = self.selenium.find_element(By.ID, "id_toppings_filter_selected")
to_box = self.selenium.find_element(By.ID, "id_toppings_to")
self.assertEqual(
(
to_filter_box.get_property("offsetHeight")
+ to_box.get_property("offsetHeight")
),
(
from_filter_box.get_property("offsetHeight")
+ from_box.get_property("offsetHeight")
),
)
def test_selectbox_height_not_collapsible_fieldset(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super",
password="secret",
login_url=reverse("admin7:index"),
)
url = self.live_server_url + reverse("admin7:admin_views_question_add")
self.selenium.get(url)
from_filter_box = self.selenium.find_element(
By.ID, "id_related_questions_filter"
)
from_box = self.selenium.find_element(By.ID, "id_related_questions_from")
to_filter_box = self.selenium.find_element(
By.ID, "id_related_questions_filter_selected"
)
to_box = self.selenium.find_element(By.ID, "id_related_questions_to")
self.assertEqual(
(
to_filter_box.get_property("offsetHeight")
+ to_box.get_property("offsetHeight")
),
(
from_filter_box.get_property("offsetHeight")
+ from_box.get_property("offsetHeight")
),
)
def test_first_field_focus(self):
"""JavaScript-assisted auto-focus on first usable form field."""
from selenium.webdriver.common.by import By
# First form field has a single widget
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
with self.wait_page_loaded():
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_picture_add")
)
self.assertEqual(
self.selenium.switch_to.active_element,
self.selenium.find_element(By.ID, "id_name"),
)
# First form field has a MultiWidget
with self.wait_page_loaded():
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_reservation_add")
)
self.assertEqual(
self.selenium.switch_to.active_element,
self.selenium.find_element(By.ID, "id_start_date_0"),
)
def test_cancel_delete_confirmation(self):
"Cancelling the deletion of an object takes the user back one page."
from selenium.webdriver.common.by import By
pizza = Pizza.objects.create(name="Double Cheese")
url = reverse("admin:admin_views_pizza_change", args=(pizza.id,))
full_url = self.live_server_url + url
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(full_url)
self.selenium.find_element(By.CLASS_NAME, "deletelink").click()
# Click 'cancel' on the delete page.
self.selenium.find_element(By.CLASS_NAME, "cancel-link").click()
# Wait until we're back on the change page.
self.wait_for_text("#content h1", "Change pizza")
self.assertEqual(self.selenium.current_url, full_url)
self.assertEqual(Pizza.objects.count(), 1)
def test_cancel_delete_related_confirmation(self):
"""
Cancelling the deletion of an object with relations takes the user back
one page.
"""
from selenium.webdriver.common.by import By
pizza = Pizza.objects.create(name="Double Cheese")
topping1 = Topping.objects.create(name="Cheddar")
topping2 = Topping.objects.create(name="Mozzarella")
pizza.toppings.add(topping1, topping2)
url = reverse("admin:admin_views_pizza_change", args=(pizza.id,))
full_url = self.live_server_url + url
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(full_url)
self.selenium.find_element(By.CLASS_NAME, "deletelink").click()
# Click 'cancel' on the delete page.
self.selenium.find_element(By.CLASS_NAME, "cancel-link").click()
# Wait until we're back on the change page.
self.wait_for_text("#content h1", "Change pizza")
self.assertEqual(self.selenium.current_url, full_url)
self.assertEqual(Pizza.objects.count(), 1)
self.assertEqual(Topping.objects.count(), 2)
def test_list_editable_popups(self):
"""
list_editable foreign keys have add/change popups.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
s1 = Section.objects.create(name="Test section")
Article.objects.create(
title="foo",
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=s1,
)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(
self.live_server_url + reverse("admin:admin_views_article_changelist")
)
# Change popup
self.selenium.find_element(By.ID, "change_id_form-0-section").click()
self.wait_for_and_switch_to_popup()
self.wait_for_text("#content h1", "Change section")
name_input = self.selenium.find_element(By.ID, "id_name")
name_input.clear()
name_input.send_keys("<i>edited section</i>")
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
# Hide sidebar.
toggle_button = self.selenium.find_element(
By.CSS_SELECTOR, "#toggle-nav-sidebar"
)
toggle_button.click()
select = Select(self.selenium.find_element(By.ID, "id_form-0-section"))
self.assertEqual(select.first_selected_option.text, "<i>edited section</i>")
# Rendered select2 input.
select2_display = self.selenium.find_element(
By.CLASS_NAME, "select2-selection__rendered"
)
# Clear button (×\n) is included in text.
self.assertEqual(select2_display.text, "×\n<i>edited section</i>")
# Add popup
self.selenium.find_element(By.ID, "add_id_form-0-section").click()
self.wait_for_and_switch_to_popup()
self.wait_for_text("#content h1", "Add section")
self.selenium.find_element(By.ID, "id_name").send_keys("new section")
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
select = Select(self.selenium.find_element(By.ID, "id_form-0-section"))
self.assertEqual(select.first_selected_option.text, "new section")
select2_display = self.selenium.find_element(
By.CLASS_NAME, "select2-selection__rendered"
)
# Clear button (×\n) is included in text.
self.assertEqual(select2_display.text, "×\nnew section")
def test_inline_uuid_pk_edit_with_popup(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
parent = ParentWithUUIDPK.objects.create(title="test")
related_with_parent = RelatedWithUUIDPKModel.objects.create(parent=parent)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
change_url = reverse(
"admin:admin_views_relatedwithuuidpkmodel_change",
args=(related_with_parent.id,),
)
self.selenium.get(self.live_server_url + change_url)
self.selenium.find_element(By.ID, "change_id_parent").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
select = Select(self.selenium.find_element(By.ID, "id_parent"))
self.assertEqual(select.first_selected_option.text, str(parent.id))
self.assertEqual(
select.first_selected_option.get_attribute("value"), str(parent.id)
)
def test_inline_uuid_pk_add_with_popup(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
self.selenium.get(
self.live_server_url
+ reverse("admin:admin_views_relatedwithuuidpkmodel_add")
)
self.selenium.find_element(By.ID, "add_id_parent").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.ID, "id_title").send_keys("test")
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
select = Select(self.selenium.find_element(By.ID, "id_parent"))
uuid_id = str(ParentWithUUIDPK.objects.first().id)
self.assertEqual(select.first_selected_option.text, uuid_id)
self.assertEqual(select.first_selected_option.get_attribute("value"), uuid_id)
def test_inline_uuid_pk_delete_with_popup(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
parent = ParentWithUUIDPK.objects.create(title="test")
related_with_parent = RelatedWithUUIDPKModel.objects.create(parent=parent)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
change_url = reverse(
"admin:admin_views_relatedwithuuidpkmodel_change",
args=(related_with_parent.id,),
)
self.selenium.get(self.live_server_url + change_url)
self.selenium.find_element(By.ID, "delete_id_parent").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.XPATH, '//input[@value="Yes, I’m sure"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
select = Select(self.selenium.find_element(By.ID, "id_parent"))
self.assertEqual(ParentWithUUIDPK.objects.count(), 0)
self.assertEqual(select.first_selected_option.text, "---------")
self.assertEqual(select.first_selected_option.get_attribute("value"), "")
def test_inline_with_popup_cancel_delete(self):
"""Clicking ""No, take me back" on a delete popup closes the window."""
from selenium.webdriver.common.by import By
parent = ParentWithUUIDPK.objects.create(title="test")
related_with_parent = RelatedWithUUIDPKModel.objects.create(parent=parent)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
change_url = reverse(
"admin:admin_views_relatedwithuuidpkmodel_change",
args=(related_with_parent.id,),
)
self.selenium.get(self.live_server_url + change_url)
self.selenium.find_element(By.ID, "delete_id_parent").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.XPATH, '//a[text()="No, take me back"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
self.assertEqual(len(self.selenium.window_handles), 1)
def test_list_editable_raw_id_fields(self):
from selenium.webdriver.common.by import By
parent = ParentWithUUIDPK.objects.create(title="test")
parent2 = ParentWithUUIDPK.objects.create(title="test2")
RelatedWithUUIDPKModel.objects.create(parent=parent)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
change_url = reverse(
"admin:admin_views_relatedwithuuidpkmodel_changelist",
current_app=site2.name,
)
self.selenium.get(self.live_server_url + change_url)
self.selenium.find_element(By.ID, "lookup_id_form-0-parent").click()
self.wait_for_and_switch_to_popup()
# Select "parent2" in the popup.
self.selenium.find_element(By.LINK_TEXT, str(parent2.pk)).click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
# The newly selected pk should appear in the raw id input.
value = self.selenium.find_element(By.ID, "id_form-0-parent").get_attribute(
"value"
)
self.assertEqual(value, str(parent2.pk))
def test_input_element_font(self):
"""
Browsers' default stylesheets override the font of inputs. The admin
adds additional CSS to handle this.
"""
from selenium.webdriver.common.by import By
self.selenium.get(self.live_server_url + reverse("admin:login"))
element = self.selenium.find_element(By.ID, "id_username")
# Some browsers quotes the fonts, some don't.
fonts = [
font.strip().strip('"')
for font in element.value_of_css_property("font-family").split(",")
]
self.assertEqual(
fonts,
[
"-apple-system",
"BlinkMacSystemFont",
"Segoe UI",
"system-ui",
"Roboto",
"Helvetica Neue",
"Arial",
"sans-serif",
"Apple Color Emoji",
"Segoe UI Emoji",
"Segoe UI Symbol",
"Noto Color Emoji",
],
)
def test_search_input_filtered_page(self):
from selenium.webdriver.common.by import By
Person.objects.create(name="Guido van Rossum", gender=1, alive=True)
Person.objects.create(name="Grace Hopper", gender=1, alive=False)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
person_url = reverse("admin:admin_views_person_changelist") + "?q=Gui"
self.selenium.get(self.live_server_url + person_url)
self.assertGreater(
self.selenium.find_element(By.ID, "searchbar").rect["width"],
50,
)
def test_related_popup_index(self):
"""
Create a chain of 'self' related objects via popups.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin:admin_views_box_add", current_app=site.name)
self.selenium.get(self.live_server_url + add_url)
base_window = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup()
popup_window_test = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "id_title").send_keys("test")
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup(num_windows=3)
popup_window_test2 = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "id_title").send_keys("test2")
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup(num_windows=4)
self.selenium.find_element(By.ID, "id_title").send_keys("test3")
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(popup_window_test2)
select = Select(self.selenium.find_element(By.ID, "id_next_box"))
next_box_id = str(Box.objects.get(title="test3").id)
self.assertEqual(
select.first_selected_option.get_attribute("value"), next_box_id
)
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(popup_window_test)
select = Select(self.selenium.find_element(By.ID, "id_next_box"))
next_box_id = str(Box.objects.get(title="test2").id)
self.assertEqual(
select.first_selected_option.get_attribute("value"), next_box_id
)
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.selenium.switch_to.window(base_window)
select = Select(self.selenium.find_element(By.ID, "id_next_box"))
next_box_id = str(Box.objects.get(title="test").id)
self.assertEqual(
select.first_selected_option.get_attribute("value"), next_box_id
)
def test_related_popup_incorrect_close(self):
"""
Cleanup child popups when closing a parent popup.
"""
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin:admin_views_box_add", current_app=site.name)
self.selenium.get(self.live_server_url + add_url)
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup()
test_window = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "id_title").send_keys("test")
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup(num_windows=3)
test2_window = self.selenium.current_window_handle
self.selenium.find_element(By.ID, "id_title").send_keys("test2")
self.selenium.find_element(By.ID, "add_id_next_box").click()
self.wait_for_and_switch_to_popup(num_windows=4)
self.assertEqual(len(self.selenium.window_handles), 4)
self.selenium.switch_to.window(test2_window)
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.wait_until(lambda d: len(d.window_handles) == 2, 1)
self.assertEqual(len(self.selenium.window_handles), 2)
# Close final popup to clean up test.
self.selenium.switch_to.window(test_window)
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.wait_until(lambda d: len(d.window_handles) == 1, 1)
self.selenium.switch_to.window(self.selenium.window_handles[-1])
def test_hidden_fields_small_window(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super",
password="secret",
login_url=reverse("admin:index"),
)
self.selenium.get(self.live_server_url + reverse("admin:admin_views_story_add"))
field_title = self.selenium.find_element(By.CLASS_NAME, "field-title")
current_size = self.selenium.get_window_size()
try:
self.selenium.set_window_size(1024, 768)
self.assertIs(field_title.is_displayed(), False)
self.selenium.set_window_size(767, 575)
self.assertIs(field_title.is_displayed(), False)
finally:
self.selenium.set_window_size(current_size["width"], current_size["height"])
def test_updating_related_objects_updates_fk_selects_except_autocompletes(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
born_country_select_id = "id_born_country"
living_country_select_id = "id_living_country"
living_country_select2_textbox_id = "select2-id_living_country-container"
favorite_country_to_vacation_select_id = "id_favorite_country_to_vacation"
continent_select_id = "id_continent"
def _get_HTML_inside_element_by_id(id_):
return self.selenium.find_element(By.ID, id_).get_attribute("innerHTML")
def _get_text_inside_element_by_selector(selector):
return self.selenium.find_element(By.CSS_SELECTOR, selector).get_attribute(
"innerText"
)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin:admin_views_traveler_add")
self.selenium.get(self.live_server_url + add_url)
# Add new Country from the born_country select.
self.selenium.find_element(By.ID, f"add_{born_country_select_id}").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.ID, "id_name").send_keys("Argentina")
continent_select = Select(
self.selenium.find_element(By.ID, continent_select_id)
)
continent_select.select_by_visible_text("South America")
self.selenium.find_element(By.CSS_SELECTOR, '[type="submit"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(born_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1" selected="">Argentina</option>
""",
)
# Argentina isn't added to the living_country select nor selected by
# the select2 widget.
self.assertEqual(
_get_text_inside_element_by_selector(f"#{living_country_select_id}"), ""
)
self.assertEqual(
_get_text_inside_element_by_selector(
f"#{living_country_select2_textbox_id}"
),
"",
)
# Argentina won't appear because favorite_country_to_vacation field has
# limit_choices_to.
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(favorite_country_to_vacation_select_id),
'<option value="" selected="">---------</option>',
)
# Add new Country from the living_country select.
self.selenium.find_element(By.ID, f"add_{living_country_select_id}").click()
self.wait_for_and_switch_to_popup()
self.selenium.find_element(By.ID, "id_name").send_keys("Spain")
continent_select = Select(
self.selenium.find_element(By.ID, continent_select_id)
)
continent_select.select_by_visible_text("Europe")
self.selenium.find_element(By.CSS_SELECTOR, '[type="submit"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(born_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1" selected="">Argentina</option>
<option value="2">Spain</option>
""",
)
# Spain is added to the living_country select and it's also selected by
# the select2 widget.
self.assertEqual(
_get_text_inside_element_by_selector(f"#{living_country_select_id} option"),
"Spain",
)
self.assertEqual(
_get_text_inside_element_by_selector(
f"#{living_country_select2_textbox_id}"
),
"Spain",
)
# Spain won't appear because favorite_country_to_vacation field has
# limit_choices_to.
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(favorite_country_to_vacation_select_id),
'<option value="" selected="">---------</option>',
)
# Edit second Country created from living_country select.
favorite_select = Select(
self.selenium.find_element(By.ID, living_country_select_id)
)
favorite_select.select_by_visible_text("Spain")
self.selenium.find_element(By.ID, f"change_{living_country_select_id}").click()
self.wait_for_and_switch_to_popup()
favorite_name_input = self.selenium.find_element(By.ID, "id_name")
favorite_name_input.clear()
favorite_name_input.send_keys("Italy")
self.selenium.find_element(By.CSS_SELECTOR, '[type="submit"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(born_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1" selected="">Argentina</option>
<option value="2">Italy</option>
""",
)
# Italy is added to the living_country select and it's also selected by
# the select2 widget.
self.assertEqual(
_get_text_inside_element_by_selector(f"#{living_country_select_id} option"),
"Italy",
)
self.assertEqual(
_get_text_inside_element_by_selector(
f"#{living_country_select2_textbox_id}"
),
"Italy",
)
# favorite_country_to_vacation field has no options.
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(favorite_country_to_vacation_select_id),
'<option value="" selected="">---------</option>',
)
# Add a new Asian country.
self.selenium.find_element(
By.ID, f"add_{favorite_country_to_vacation_select_id}"
).click()
self.wait_for_and_switch_to_popup()
favorite_name_input = self.selenium.find_element(By.ID, "id_name")
favorite_name_input.send_keys("Qatar")
continent_select = Select(
self.selenium.find_element(By.ID, continent_select_id)
)
continent_select.select_by_visible_text("Asia")
self.selenium.find_element(By.CSS_SELECTOR, '[type="submit"]').click()
self.selenium.switch_to.window(self.selenium.window_handles[0])
# Submit the new Traveler.
self.selenium.find_element(By.CSS_SELECTOR, '[name="_save"]').click()
traveler = Traveler.objects.get()
self.assertEqual(traveler.born_country.name, "Argentina")
self.assertEqual(traveler.living_country.name, "Italy")
self.assertEqual(traveler.favorite_country_to_vacation.name, "Qatar")
def test_redirect_on_add_view_add_another_button(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin7:admin_views_section_add")
self.selenium.get(self.live_server_url + add_url)
name_input = self.selenium.find_element(By.ID, "id_name")
name_input.send_keys("Test section 1")
self.selenium.find_element(
By.XPATH, '//input[@value="Save and add another"]'
).click()
self.assertEqual(Section.objects.count(), 1)
name_input = self.selenium.find_element(By.ID, "id_name")
name_input.send_keys("Test section 2")
self.selenium.find_element(
By.XPATH, '//input[@value="Save and add another"]'
).click()
self.assertEqual(Section.objects.count(), 2)
def test_redirect_on_add_view_continue_button(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
add_url = reverse("admin7:admin_views_section_add")
self.selenium.get(self.live_server_url + add_url)
name_input = self.selenium.find_element(By.ID, "id_name")
name_input.send_keys("Test section 1")
self.selenium.find_element(
By.XPATH, '//input[@value="Save and continue editing"]'
).click()
self.assertEqual(Section.objects.count(), 1)
name_input = self.selenium.find_element(By.ID, "id_name")
name_input_value = name_input.get_attribute("value")
self.assertEqual(name_input_value, "Test section 1")
@override_settings(ROOT_URLCONF="admin_views.urls")
class ReadonlyTest(AdminFieldExtractionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_readonly_get(self):
response = self.client.get(reverse("admin:admin_views_post_add"))
self.assertNotContains(response, 'name="posted"')
# 3 fields + 2 submit buttons + 5 inline management form fields, + 2
# hidden fields for inlines + 1 field for the inline + 2 empty form
# + 1 logout form.
self.assertContains(response, "<input", count=17)
self.assertContains(response, formats.localize(datetime.date.today()))
self.assertContains(response, "<label>Awesomeness level:</label>")
self.assertContains(response, "Very awesome.")
self.assertContains(response, "Unknown coolness.")
self.assertContains(response, "foo")
# Multiline text in a readonly field gets <br> tags
self.assertContains(response, "Multiline<br>test<br>string")
self.assertContains(
response,
'<div class="readonly">Multiline<br>html<br>content</div>',
html=True,
)
self.assertContains(response, "InlineMultiline<br>test<br>string")
self.assertContains(
response,
formats.localize(datetime.date.today() - datetime.timedelta(days=7)),
)
self.assertContains(response, '<div class="form-row field-coolness">')
self.assertContains(response, '<div class="form-row field-awesomeness_level">')
self.assertContains(response, '<div class="form-row field-posted">')
self.assertContains(response, '<div class="form-row field-value">')
self.assertContains(response, '<div class="form-row">')
self.assertContains(response, '<div class="help"', 3)
self.assertContains(
response,
'<div class="help" id="id_title_helptext">Some help text for the title '
"(with Unicode ŠĐĆŽćžšđ)</div>",
html=True,
)
self.assertContains(
response,
'<div class="help" id="id_content_helptext">Some help text for the content '
"(with Unicode ŠĐĆŽćžšđ)</div>",
html=True,
)
self.assertContains(
response,
'<div class="help">Some help text for the date (with Unicode ŠĐĆŽćžšđ)'
"</div>",
html=True,
)
p = Post.objects.create(
title="I worked on readonly_fields", content="Its good stuff"
)
response = self.client.get(
reverse("admin:admin_views_post_change", args=(p.pk,))
)
self.assertContains(response, "%d amount of cool" % p.pk)
def test_readonly_text_field(self):
p = Post.objects.create(
title="Readonly test",
content="test",
readonly_content="test\r\n\r\ntest\r\n\r\ntest\r\n\r\ntest",
)
Link.objects.create(
url="http://www.djangoproject.com",
post=p,
readonly_link_content="test\r\nlink",
)
response = self.client.get(
reverse("admin:admin_views_post_change", args=(p.pk,))
)
# Checking readonly field.
self.assertContains(response, "test<br><br>test<br><br>test<br><br>test")
# Checking readonly field in inline.
self.assertContains(response, "test<br>link")
def test_readonly_post(self):
data = {
"title": "Django Got Readonly Fields",
"content": "This is an incredible development.",
"link_set-TOTAL_FORMS": "1",
"link_set-INITIAL_FORMS": "0",
"link_set-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse("admin:admin_views_post_add"), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Post.objects.count(), 1)
p = Post.objects.get()
self.assertEqual(p.posted, datetime.date.today())
data["posted"] = "10-8-1990" # some date that's not today
response = self.client.post(reverse("admin:admin_views_post_add"), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Post.objects.count(), 2)
p = Post.objects.order_by("-id")[0]
self.assertEqual(p.posted, datetime.date.today())
def test_readonly_manytomany(self):
"Regression test for #13004"
response = self.client.get(reverse("admin:admin_views_pizza_add"))
self.assertEqual(response.status_code, 200)
def test_user_password_change_limited_queryset(self):
su = User.objects.filter(is_superuser=True)[0]
response = self.client.get(
reverse("admin2:auth_user_password_change", args=(su.pk,))
)
self.assertEqual(response.status_code, 404)
def test_change_form_renders_correct_null_choice_value(self):
"""
Regression test for #17911.
"""
choice = Choice.objects.create(choice=None)
response = self.client.get(
reverse("admin:admin_views_choice_change", args=(choice.pk,))
)
self.assertContains(
response, '<div class="readonly">No opinion</div>', html=True
)
def _test_readonly_foreignkey_links(self, admin_site):
"""
ForeignKey readonly fields render as links if the target model is
registered in admin.
"""
chapter = Chapter.objects.create(
title="Chapter 1",
content="content",
book=Book.objects.create(name="Book 1"),
)
language = Language.objects.create(iso="_40", name="Test")
obj = ReadOnlyRelatedField.objects.create(
chapter=chapter,
language=language,
user=self.superuser,
)
response = self.client.get(
reverse(
f"{admin_site}:admin_views_readonlyrelatedfield_change", args=(obj.pk,)
),
)
# Related ForeignKey object registered in admin.
user_url = reverse(f"{admin_site}:auth_user_change", args=(self.superuser.pk,))
self.assertContains(
response,
'<div class="readonly"><a href="%s">super</a></div>' % user_url,
html=True,
)
# Related ForeignKey with the string primary key registered in admin.
language_url = reverse(
f"{admin_site}:admin_views_language_change",
args=(quote(language.pk),),
)
self.assertContains(
response,
'<div class="readonly"><a href="%s">_40</a></div>' % language_url,
html=True,
)
# Related ForeignKey object not registered in admin.
self.assertContains(
response, '<div class="readonly">Chapter 1</div>', html=True
)
def test_readonly_foreignkey_links_default_admin_site(self):
self._test_readonly_foreignkey_links("admin")
def test_readonly_foreignkey_links_custom_admin_site(self):
self._test_readonly_foreignkey_links("namespaced_admin")
def test_readonly_manytomany_backwards_ref(self):
"""
Regression test for #16433 - backwards references for related objects
broke if the related field is read-only due to the help_text attribute
"""
topping = Topping.objects.create(name="Salami")
pizza = Pizza.objects.create(name="Americano")
pizza.toppings.add(topping)
response = self.client.get(reverse("admin:admin_views_topping_add"))
self.assertEqual(response.status_code, 200)
def test_readonly_manytomany_forwards_ref(self):
topping = Topping.objects.create(name="Salami")
pizza = Pizza.objects.create(name="Americano")
pizza.toppings.add(topping)
response = self.client.get(
reverse("admin:admin_views_pizza_change", args=(pizza.pk,))
)
self.assertContains(response, "<label>Toppings:</label>", html=True)
self.assertContains(response, '<div class="readonly">Salami</div>', html=True)
def test_readonly_onetoone_backwards_ref(self):
"""
Can reference a reverse OneToOneField in ModelAdmin.readonly_fields.
"""
v1 = Villain.objects.create(name="Adam")
pl = Plot.objects.create(name="Test Plot", team_leader=v1, contact=v1)
pd = PlotDetails.objects.create(details="Brand New Plot", plot=pl)
response = self.client.get(
reverse("admin:admin_views_plotproxy_change", args=(pl.pk,))
)
field = self.get_admin_readonly_field(response, "plotdetails")
pd_url = reverse("admin:admin_views_plotdetails_change", args=(pd.pk,))
self.assertEqual(field.contents(), '<a href="%s">Brand New Plot</a>' % pd_url)
# The reverse relation also works if the OneToOneField is null.
pd.plot = None
pd.save()
response = self.client.get(
reverse("admin:admin_views_plotproxy_change", args=(pl.pk,))
)
field = self.get_admin_readonly_field(response, "plotdetails")
self.assertEqual(field.contents(), "-") # default empty value
def test_readonly_field_overrides(self):
"""
Regression test for #22087 - ModelForm Meta overrides are ignored by
AdminReadonlyField
"""
p = FieldOverridePost.objects.create(title="Test Post", content="Test Content")
response = self.client.get(
reverse("admin:admin_views_fieldoverridepost_change", args=(p.pk,))
)
self.assertContains(
response,
'<div class="help">Overridden help text for the date</div>',
html=True,
)
self.assertContains(
response,
'<label for="id_public">Overridden public label:</label>',
html=True,
)
self.assertNotContains(
response, "Some help text for the date (with Unicode ŠĐĆŽćžšđ)"
)
def test_correct_autoescaping(self):
"""
Make sure that non-field readonly elements are properly autoescaped (#24461)
"""
section = Section.objects.create(name="<a>evil</a>")
response = self.client.get(
reverse("admin:admin_views_section_change", args=(section.pk,))
)
self.assertNotContains(response, "<a>evil</a>", status_code=200)
self.assertContains(response, "<a>evil</a>", status_code=200)
def test_label_suffix_translated(self):
pizza = Pizza.objects.create(name="Americano")
url = reverse("admin:admin_views_pizza_change", args=(pizza.pk,))
with self.settings(LANGUAGE_CODE="fr"):
response = self.client.get(url)
self.assertContains(response, "<label>Toppings\u00A0:</label>", html=True)
@override_settings(ROOT_URLCONF="admin_views.urls")
class LimitChoicesToInAdminTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_limit_choices_to_as_callable(self):
"""Test for ticket 2445 changes to admin."""
threepwood = Character.objects.create(
username="threepwood",
last_action=datetime.datetime.today() + datetime.timedelta(days=1),
)
marley = Character.objects.create(
username="marley",
last_action=datetime.datetime.today() - datetime.timedelta(days=1),
)
response = self.client.get(reverse("admin:admin_views_stumpjoke_add"))
# The allowed option should appear twice; the limited option should not appear.
self.assertContains(response, threepwood.username, count=2)
self.assertNotContains(response, marley.username)
@override_settings(ROOT_URLCONF="admin_views.urls")
class RawIdFieldsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_limit_choices_to(self):
"""Regression test for 14880"""
actor = Actor.objects.create(name="Palin", age=27)
Inquisition.objects.create(expected=True, leader=actor, country="England")
Inquisition.objects.create(expected=False, leader=actor, country="Spain")
response = self.client.get(reverse("admin:admin_views_sketch_add"))
# Find the link
m = re.search(
rb'<a href="([^"]*)"[^>]* id="lookup_id_inquisition"', response.content
)
self.assertTrue(m) # Got a match
popup_url = m[1].decode().replace("&", "&")
# Handle relative links
popup_url = urljoin(response.request["PATH_INFO"], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step also tests integers, strings and booleans in the
# lookup query string; in model we define inquisition field to have a
# limit_choices_to option that includes a filter on a string field
# (inquisition__actor__name), a filter on an integer field
# (inquisition__actor__age), and a filter on a boolean field
# (inquisition__expected).
response2 = self.client.get(popup_url)
self.assertContains(response2, "Spain")
self.assertNotContains(response2, "England")
def test_limit_choices_to_isnull_false(self):
"""Regression test for 20182"""
Actor.objects.create(name="Palin", age=27)
Actor.objects.create(name="Kilbraken", age=50, title="Judge")
response = self.client.get(reverse("admin:admin_views_sketch_add"))
# Find the link
m = re.search(
rb'<a href="([^"]*)"[^>]* id="lookup_id_defendant0"', response.content
)
self.assertTrue(m) # Got a match
popup_url = m[1].decode().replace("&", "&")
# Handle relative links
popup_url = urljoin(response.request["PATH_INFO"], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step tests field__isnull=0 gets parsed correctly from the
# lookup query string; in model we define defendant0 field to have a
# limit_choices_to option that includes "actor__title__isnull=False".
response2 = self.client.get(popup_url)
self.assertContains(response2, "Kilbraken")
self.assertNotContains(response2, "Palin")
def test_limit_choices_to_isnull_true(self):
"""Regression test for 20182"""
Actor.objects.create(name="Palin", age=27)
Actor.objects.create(name="Kilbraken", age=50, title="Judge")
response = self.client.get(reverse("admin:admin_views_sketch_add"))
# Find the link
m = re.search(
rb'<a href="([^"]*)"[^>]* id="lookup_id_defendant1"', response.content
)
self.assertTrue(m) # Got a match
popup_url = m[1].decode().replace("&", "&")
# Handle relative links
popup_url = urljoin(response.request["PATH_INFO"], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step tests field__isnull=1 gets parsed correctly from the
# lookup query string; in model we define defendant1 field to have a
# limit_choices_to option that includes "actor__title__isnull=True".
response2 = self.client.get(popup_url)
self.assertNotContains(response2, "Kilbraken")
self.assertContains(response2, "Palin")
def test_list_display_method_same_name_as_reverse_accessor(self):
"""
Should be able to use a ModelAdmin method in list_display that has the
same name as a reverse model field ("sketch" in this case).
"""
actor = Actor.objects.create(name="Palin", age=27)
Inquisition.objects.create(expected=True, leader=actor, country="England")
response = self.client.get(reverse("admin:admin_views_inquisition_changelist"))
self.assertContains(response, "list-display-sketch")
@override_settings(ROOT_URLCONF="admin_views.urls")
class UserAdminTest(TestCase):
"""
Tests user CRUD functionality.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.adduser = User.objects.create_user(
username="adduser", password="secret", is_staff=True
)
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.per1 = Person.objects.create(name="John Mauchly", gender=1, alive=True)
cls.per2 = Person.objects.create(name="Grace Hopper", gender=1, alive=False)
cls.per3 = Person.objects.create(name="Guido van Rossum", gender=1, alive=True)
def setUp(self):
self.client.force_login(self.superuser)
def test_save_button(self):
user_count = User.objects.count()
response = self.client.post(
reverse("admin:auth_user_add"),
{
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
},
)
new_user = User.objects.get(username="newuser")
self.assertRedirects(
response, reverse("admin:auth_user_change", args=(new_user.pk,))
)
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
def test_save_continue_editing_button(self):
user_count = User.objects.count()
response = self.client.post(
reverse("admin:auth_user_add"),
{
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
"_continue": "1",
},
)
new_user = User.objects.get(username="newuser")
new_user_url = reverse("admin:auth_user_change", args=(new_user.pk,))
self.assertRedirects(response, new_user_url, fetch_redirect_response=False)
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
response = self.client.get(new_user_url)
self.assertContains(
response,
'<li class="success">The user “<a href="%s">'
"%s</a>” was added successfully. You may edit it again below.</li>"
% (new_user_url, new_user),
html=True,
)
def test_password_mismatch(self):
response = self.client.post(
reverse("admin:auth_user_add"),
{
"username": "newuser",
"password1": "newpassword",
"password2": "mismatch",
},
)
self.assertEqual(response.status_code, 200)
self.assertFormError(response.context["adminform"], "password1", [])
self.assertFormError(
response.context["adminform"],
"password2",
["The two password fields didn’t match."],
)
def test_user_fk_add_popup(self):
"""
User addition through a FK popup should return the appropriate
JavaScript response.
"""
response = self.client.get(reverse("admin:admin_views_album_add"))
self.assertContains(response, reverse("admin:auth_user_add"))
self.assertContains(
response,
'class="related-widget-wrapper-link add-related" id="add_id_owner"',
)
response = self.client.get(
reverse("admin:auth_user_add") + "?%s=1" % IS_POPUP_VAR
)
self.assertNotContains(response, 'name="_continue"')
self.assertNotContains(response, 'name="_addanother"')
data = {
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
IS_POPUP_VAR: "1",
"_save": "1",
}
response = self.client.post(
reverse("admin:auth_user_add") + "?%s=1" % IS_POPUP_VAR, data, follow=True
)
self.assertContains(response, ""obj": "newuser"")
def test_user_fk_change_popup(self):
"""
User change through a FK popup should return the appropriate JavaScript
response.
"""
response = self.client.get(reverse("admin:admin_views_album_add"))
self.assertContains(
response, reverse("admin:auth_user_change", args=("__fk__",))
)
self.assertContains(
response,
'class="related-widget-wrapper-link change-related" id="change_id_owner"',
)
user = User.objects.get(username="changeuser")
url = (
reverse("admin:auth_user_change", args=(user.pk,)) + "?%s=1" % IS_POPUP_VAR
)
response = self.client.get(url)
self.assertNotContains(response, 'name="_continue"')
self.assertNotContains(response, 'name="_addanother"')
data = {
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
"last_login_0": "2007-05-30",
"last_login_1": "13:20:10",
"date_joined_0": "2007-05-30",
"date_joined_1": "13:20:10",
IS_POPUP_VAR: "1",
"_save": "1",
}
response = self.client.post(url, data, follow=True)
self.assertContains(response, ""obj": "newuser"")
self.assertContains(response, ""action": "change"")
def test_user_fk_delete_popup(self):
"""
User deletion through a FK popup should return the appropriate
JavaScript response.
"""
response = self.client.get(reverse("admin:admin_views_album_add"))
self.assertContains(
response, reverse("admin:auth_user_delete", args=("__fk__",))
)
self.assertContains(
response,
'class="related-widget-wrapper-link change-related" id="change_id_owner"',
)
user = User.objects.get(username="changeuser")
url = (
reverse("admin:auth_user_delete", args=(user.pk,)) + "?%s=1" % IS_POPUP_VAR
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = {
"post": "yes",
IS_POPUP_VAR: "1",
}
response = self.client.post(url, data, follow=True)
self.assertContains(response, ""action": "delete"")
def test_save_add_another_button(self):
user_count = User.objects.count()
response = self.client.post(
reverse("admin:auth_user_add"),
{
"username": "newuser",
"password1": "newpassword",
"password2": "newpassword",
"_addanother": "1",
},
)
new_user = User.objects.order_by("-id")[0]
self.assertRedirects(response, reverse("admin:auth_user_add"))
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
def test_user_permission_performance(self):
u = User.objects.all()[0]
# Don't depend on a warm cache, see #17377.
ContentType.objects.clear_cache()
expected_num_queries = 10 if connection.features.uses_savepoints else 8
with self.assertNumQueries(expected_num_queries):
response = self.client.get(reverse("admin:auth_user_change", args=(u.pk,)))
self.assertEqual(response.status_code, 200)
def test_form_url_present_in_context(self):
u = User.objects.all()[0]
response = self.client.get(
reverse("admin3:auth_user_password_change", args=(u.pk,))
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["form_url"], "pony")
@override_settings(ROOT_URLCONF="admin_views.urls")
class GroupAdminTest(TestCase):
"""
Tests group CRUD functionality.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_save_button(self):
group_count = Group.objects.count()
response = self.client.post(
reverse("admin:auth_group_add"),
{
"name": "newgroup",
},
)
Group.objects.order_by("-id")[0]
self.assertRedirects(response, reverse("admin:auth_group_changelist"))
self.assertEqual(Group.objects.count(), group_count + 1)
def test_group_permission_performance(self):
g = Group.objects.create(name="test_group")
# Ensure no queries are skipped due to cached content type for Group.
ContentType.objects.clear_cache()
expected_num_queries = 8 if connection.features.uses_savepoints else 6
with self.assertNumQueries(expected_num_queries):
response = self.client.get(reverse("admin:auth_group_change", args=(g.pk,)))
self.assertEqual(response.status_code, 200)
@override_settings(ROOT_URLCONF="admin_views.urls")
class CSSTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_field_prefix_css_classes(self):
"""
Fields have a CSS class name with a 'field-' prefix.
"""
response = self.client.get(reverse("admin:admin_views_post_add"))
# The main form
self.assertContains(response, 'class="form-row field-title"')
self.assertContains(response, 'class="form-row field-content"')
self.assertContains(response, 'class="form-row field-public"')
self.assertContains(response, 'class="form-row field-awesomeness_level"')
self.assertContains(response, 'class="form-row field-coolness"')
self.assertContains(response, 'class="form-row field-value"')
self.assertContains(response, 'class="form-row"') # The lambda function
# The tabular inline
self.assertContains(response, '<td class="field-url">')
self.assertContains(response, '<td class="field-posted">')
def test_index_css_classes(self):
"""
CSS class names are used for each app and model on the admin index
pages (#17050).
"""
# General index page
response = self.client.get(reverse("admin:index"))
self.assertContains(response, '<div class="app-admin_views module')
self.assertContains(response, '<tr class="model-actor">')
self.assertContains(response, '<tr class="model-album">')
# App index page
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertContains(response, '<div class="app-admin_views module')
self.assertContains(response, '<tr class="model-actor">')
self.assertContains(response, '<tr class="model-album">')
def test_app_model_in_form_body_class(self):
"""
Ensure app and model tag are correctly read by change_form template
"""
response = self.client.get(reverse("admin:admin_views_section_add"))
self.assertContains(response, '<body class=" app-admin_views model-section ')
def test_app_model_in_list_body_class(self):
"""
Ensure app and model tag are correctly read by change_list template
"""
response = self.client.get(reverse("admin:admin_views_section_changelist"))
self.assertContains(response, '<body class=" app-admin_views model-section ')
def test_app_model_in_delete_confirmation_body_class(self):
"""
Ensure app and model tag are correctly read by delete_confirmation
template
"""
response = self.client.get(
reverse("admin:admin_views_section_delete", args=(self.s1.pk,))
)
self.assertContains(response, '<body class=" app-admin_views model-section ')
def test_app_model_in_app_index_body_class(self):
"""
Ensure app and model tag are correctly read by app_index template
"""
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertContains(response, '<body class=" dashboard app-admin_views')
def test_app_model_in_delete_selected_confirmation_body_class(self):
"""
Ensure app and model tag are correctly read by
delete_selected_confirmation template
"""
action_data = {
ACTION_CHECKBOX_NAME: [self.s1.pk],
"action": "delete_selected",
"index": 0,
}
response = self.client.post(
reverse("admin:admin_views_section_changelist"), action_data
)
self.assertContains(response, '<body class=" app-admin_views model-section ')
def test_changelist_field_classes(self):
"""
Cells of the change list table should contain the field name in their
class attribute.
"""
Podcast.objects.create(name="Django Dose", release_date=datetime.date.today())
response = self.client.get(reverse("admin:admin_views_podcast_changelist"))
self.assertContains(response, '<th class="field-name">')
self.assertContains(response, '<td class="field-release_date nowrap">')
self.assertContains(response, '<td class="action-checkbox">')
try:
import docutils
except ImportError:
docutils = None
@unittest.skipUnless(docutils, "no docutils installed.")
@override_settings(ROOT_URLCONF="admin_views.urls")
@modify_settings(
INSTALLED_APPS={"append": ["django.contrib.admindocs", "django.contrib.flatpages"]}
)
class AdminDocsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_tags(self):
response = self.client.get(reverse("django-admindocs-tags"))
# The builtin tag group exists
self.assertContains(response, "<h2>Built-in tags</h2>", count=2, html=True)
# A builtin tag exists in both the index and detail
self.assertContains(
response, '<h3 id="built_in-autoescape">autoescape</h3>', html=True
)
self.assertContains(
response,
'<li><a href="#built_in-autoescape">autoescape</a></li>',
html=True,
)
# An app tag exists in both the index and detail
self.assertContains(
response, '<h3 id="flatpages-get_flatpages">get_flatpages</h3>', html=True
)
self.assertContains(
response,
'<li><a href="#flatpages-get_flatpages">get_flatpages</a></li>',
html=True,
)
# The admin list tag group exists
self.assertContains(response, "<h2>admin_list</h2>", count=2, html=True)
# An admin list tag exists in both the index and detail
self.assertContains(
response, '<h3 id="admin_list-admin_actions">admin_actions</h3>', html=True
)
self.assertContains(
response,
'<li><a href="#admin_list-admin_actions">admin_actions</a></li>',
html=True,
)
def test_filters(self):
response = self.client.get(reverse("django-admindocs-filters"))
# The builtin filter group exists
self.assertContains(response, "<h2>Built-in filters</h2>", count=2, html=True)
# A builtin filter exists in both the index and detail
self.assertContains(response, '<h3 id="built_in-add">add</h3>', html=True)
self.assertContains(
response, '<li><a href="#built_in-add">add</a></li>', html=True
)
@override_settings(
ROOT_URLCONF="admin_views.urls",
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
class ValidXHTMLTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_lang_name_present(self):
with translation.override(None):
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertNotContains(response, ' lang=""')
self.assertNotContains(response, ' xml:lang=""')
@override_settings(ROOT_URLCONF="admin_views.urls", USE_THOUSAND_SEPARATOR=True)
class DateHierarchyTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def assert_non_localized_year(self, response, year):
"""
The year is not localized with USE_THOUSAND_SEPARATOR (#15234).
"""
self.assertNotContains(response, formats.number_format(year))
def assert_contains_year_link(self, response, date):
self.assertContains(response, '?release_date__year=%d"' % date.year)
def assert_contains_month_link(self, response, date):
self.assertContains(
response,
'?release_date__month=%d&release_date__year=%d"'
% (date.month, date.year),
)
def assert_contains_day_link(self, response, date):
self.assertContains(
response,
"?release_date__day=%d&"
'release_date__month=%d&release_date__year=%d"'
% (date.day, date.month, date.year),
)
def test_empty(self):
"""
No date hierarchy links display with empty changelist.
"""
response = self.client.get(reverse("admin:admin_views_podcast_changelist"))
self.assertNotContains(response, "release_date__year=")
self.assertNotContains(response, "release_date__month=")
self.assertNotContains(response, "release_date__day=")
def test_single(self):
"""
Single day-level date hierarchy appears for single object.
"""
DATE = datetime.date(2000, 6, 30)
Podcast.objects.create(release_date=DATE)
url = reverse("admin:admin_views_podcast_changelist")
response = self.client.get(url)
self.assert_contains_day_link(response, DATE)
self.assert_non_localized_year(response, 2000)
def test_within_month(self):
"""
day-level links appear for changelist within single month.
"""
DATES = (
datetime.date(2000, 6, 30),
datetime.date(2000, 6, 15),
datetime.date(2000, 6, 3),
)
for date in DATES:
Podcast.objects.create(release_date=date)
url = reverse("admin:admin_views_podcast_changelist")
response = self.client.get(url)
for date in DATES:
self.assert_contains_day_link(response, date)
self.assert_non_localized_year(response, 2000)
def test_within_year(self):
"""
month-level links appear for changelist within single year.
"""
DATES = (
datetime.date(2000, 1, 30),
datetime.date(2000, 3, 15),
datetime.date(2000, 5, 3),
)
for date in DATES:
Podcast.objects.create(release_date=date)
url = reverse("admin:admin_views_podcast_changelist")
response = self.client.get(url)
# no day-level links
self.assertNotContains(response, "release_date__day=")
for date in DATES:
self.assert_contains_month_link(response, date)
self.assert_non_localized_year(response, 2000)
def test_multiple_years(self):
"""
year-level links appear for year-spanning changelist.
"""
DATES = (
datetime.date(2001, 1, 30),
datetime.date(2003, 3, 15),
datetime.date(2005, 5, 3),
)
for date in DATES:
Podcast.objects.create(release_date=date)
response = self.client.get(reverse("admin:admin_views_podcast_changelist"))
# no day/month-level links
self.assertNotContains(response, "release_date__day=")
self.assertNotContains(response, "release_date__month=")
for date in DATES:
self.assert_contains_year_link(response, date)
# and make sure GET parameters still behave correctly
for date in DATES:
url = "%s?release_date__year=%d" % (
reverse("admin:admin_views_podcast_changelist"),
date.year,
)
response = self.client.get(url)
self.assert_contains_month_link(response, date)
self.assert_non_localized_year(response, 2000)
self.assert_non_localized_year(response, 2003)
self.assert_non_localized_year(response, 2005)
url = "%s?release_date__year=%d&release_date__month=%d" % (
reverse("admin:admin_views_podcast_changelist"),
date.year,
date.month,
)
response = self.client.get(url)
self.assert_contains_day_link(response, date)
self.assert_non_localized_year(response, 2000)
self.assert_non_localized_year(response, 2003)
self.assert_non_localized_year(response, 2005)
def test_related_field(self):
questions_data = (
# (posted data, number of answers),
(datetime.date(2001, 1, 30), 0),
(datetime.date(2003, 3, 15), 1),
(datetime.date(2005, 5, 3), 2),
)
for date, answer_count in questions_data:
question = Question.objects.create(posted=date)
for i in range(answer_count):
question.answer_set.create()
response = self.client.get(reverse("admin:admin_views_answer_changelist"))
for date, answer_count in questions_data:
link = '?question__posted__year=%d"' % date.year
if answer_count > 0:
self.assertContains(response, link)
else:
self.assertNotContains(response, link)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminCustomSaveRelatedTests(TestCase):
"""
One can easily customize the way related objects are saved.
Refs #16115.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_should_be_able_to_edit_related_objects_on_add_view(self):
post = {
"child_set-TOTAL_FORMS": "3",
"child_set-INITIAL_FORMS": "0",
"name": "Josh Stone",
"child_set-0-name": "Paul",
"child_set-1-name": "Catherine",
}
self.client.post(reverse("admin:admin_views_parent_add"), post)
self.assertEqual(1, Parent.objects.count())
self.assertEqual(2, Child.objects.count())
children_names = list(
Child.objects.order_by("name").values_list("name", flat=True)
)
self.assertEqual("Josh Stone", Parent.objects.latest("id").name)
self.assertEqual(["Catherine Stone", "Paul Stone"], children_names)
def test_should_be_able_to_edit_related_objects_on_change_view(self):
parent = Parent.objects.create(name="Josh Stone")
paul = Child.objects.create(parent=parent, name="Paul")
catherine = Child.objects.create(parent=parent, name="Catherine")
post = {
"child_set-TOTAL_FORMS": "5",
"child_set-INITIAL_FORMS": "2",
"name": "Josh Stone",
"child_set-0-name": "Paul",
"child_set-0-id": paul.id,
"child_set-1-name": "Catherine",
"child_set-1-id": catherine.id,
}
self.client.post(
reverse("admin:admin_views_parent_change", args=(parent.id,)), post
)
children_names = list(
Child.objects.order_by("name").values_list("name", flat=True)
)
self.assertEqual("Josh Stone", Parent.objects.latest("id").name)
self.assertEqual(["Catherine Stone", "Paul Stone"], children_names)
def test_should_be_able_to_edit_related_objects_on_changelist_view(self):
parent = Parent.objects.create(name="Josh Rock")
Child.objects.create(parent=parent, name="Paul")
Child.objects.create(parent=parent, name="Catherine")
post = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": parent.id,
"form-0-name": "Josh Stone",
"_save": "Save",
}
self.client.post(reverse("admin:admin_views_parent_changelist"), post)
children_names = list(
Child.objects.order_by("name").values_list("name", flat=True)
)
self.assertEqual("Josh Stone", Parent.objects.latest("id").name)
self.assertEqual(["Catherine Stone", "Paul Stone"], children_names)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewLogoutTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def test_logout(self):
self.client.force_login(self.superuser)
response = self.client.post(reverse("admin:logout"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "registration/logged_out.html")
self.assertEqual(response.request["PATH_INFO"], reverse("admin:logout"))
self.assertFalse(response.context["has_permission"])
self.assertNotContains(
response, "user-tools"
) # user-tools div shouldn't visible.
def test_client_logout_url_can_be_used_to_login(self):
response = self.client.post(reverse("admin:logout"))
self.assertEqual(
response.status_code, 302
) # we should be redirected to the login page.
# follow the redirect and test results.
response = self.client.post(reverse("admin:logout"), follow=True)
self.assertContains(
response,
'<input type="hidden" name="next" value="%s">' % reverse("admin:index"),
)
self.assertTemplateUsed(response, "admin/login.html")
self.assertEqual(response.request["PATH_INFO"], reverse("admin:login"))
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminUserMessageTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def send_message(self, level):
"""
Helper that sends a post to the dummy test methods and asserts that a
message with the level has appeared in the response.
"""
action_data = {
ACTION_CHECKBOX_NAME: [1],
"action": "message_%s" % level,
"index": 0,
}
response = self.client.post(
reverse("admin:admin_views_usermessenger_changelist"),
action_data,
follow=True,
)
self.assertContains(
response, '<li class="%s">Test %s</li>' % (level, level), html=True
)
@override_settings(MESSAGE_LEVEL=10) # Set to DEBUG for this request
def test_message_debug(self):
self.send_message("debug")
def test_message_info(self):
self.send_message("info")
def test_message_success(self):
self.send_message("success")
def test_message_warning(self):
self.send_message("warning")
def test_message_error(self):
self.send_message("error")
def test_message_extra_tags(self):
action_data = {
ACTION_CHECKBOX_NAME: [1],
"action": "message_extra_tags",
"index": 0,
}
response = self.client.post(
reverse("admin:admin_views_usermessenger_changelist"),
action_data,
follow=True,
)
self.assertContains(
response, '<li class="extra_tag info">Test tags</li>', html=True
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminKeepChangeListFiltersTests(TestCase):
admin_site = site
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.joepublicuser = User.objects.create_user(
username="joepublic", password="secret"
)
def setUp(self):
self.client.force_login(self.superuser)
def assertURLEqual(self, url1, url2, msg_prefix=""):
"""
Assert that two URLs are equal despite the ordering
of their querystring. Refs #22360.
"""
parsed_url1 = urlparse(url1)
path1 = parsed_url1.path
parsed_qs1 = dict(parse_qsl(parsed_url1.query))
parsed_url2 = urlparse(url2)
path2 = parsed_url2.path
parsed_qs2 = dict(parse_qsl(parsed_url2.query))
for parsed_qs in [parsed_qs1, parsed_qs2]:
if "_changelist_filters" in parsed_qs:
changelist_filters = parsed_qs["_changelist_filters"]
parsed_filters = dict(parse_qsl(changelist_filters))
parsed_qs["_changelist_filters"] = parsed_filters
self.assertEqual(path1, path2)
self.assertEqual(parsed_qs1, parsed_qs2)
def test_assert_url_equal(self):
# Test equality.
change_user_url = reverse(
"admin:auth_user_change", args=(self.joepublicuser.pk,)
)
self.assertURLEqual(
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
)
# Test inequality.
with self.assertRaises(AssertionError):
self.assertURLEqual(
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D1%26is_superuser__exact%3D1".format(change_user_url),
)
# Ignore scheme and host.
self.assertURLEqual(
"http://testserver{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
"{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
)
# Ignore ordering of querystring.
self.assertURLEqual(
"{}?is_staff__exact=0&is_superuser__exact=0".format(
reverse("admin:auth_user_changelist")
),
"{}?is_superuser__exact=0&is_staff__exact=0".format(
reverse("admin:auth_user_changelist")
),
)
# Ignore ordering of _changelist_filters.
self.assertURLEqual(
"{}?_changelist_filters="
"is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url),
"{}?_changelist_filters="
"is_superuser__exact%3D0%26is_staff__exact%3D0".format(change_user_url),
)
def get_changelist_filters(self):
return {
"is_superuser__exact": 0,
"is_staff__exact": 0,
}
def get_changelist_filters_querystring(self):
return urlencode(self.get_changelist_filters())
def get_preserved_filters_querystring(self):
return urlencode(
{"_changelist_filters": self.get_changelist_filters_querystring()}
)
def get_sample_user_id(self):
return self.joepublicuser.pk
def get_changelist_url(self):
return "%s?%s" % (
reverse("admin:auth_user_changelist", current_app=self.admin_site.name),
self.get_changelist_filters_querystring(),
)
def get_add_url(self, add_preserved_filters=True):
url = reverse("admin:auth_user_add", current_app=self.admin_site.name)
if add_preserved_filters:
url = "%s?%s" % (url, self.get_preserved_filters_querystring())
return url
def get_change_url(self, user_id=None, add_preserved_filters=True):
if user_id is None:
user_id = self.get_sample_user_id()
url = reverse(
"admin:auth_user_change", args=(user_id,), current_app=self.admin_site.name
)
if add_preserved_filters:
url = "%s?%s" % (url, self.get_preserved_filters_querystring())
return url
def get_history_url(self, user_id=None):
if user_id is None:
user_id = self.get_sample_user_id()
return "%s?%s" % (
reverse(
"admin:auth_user_history",
args=(user_id,),
current_app=self.admin_site.name,
),
self.get_preserved_filters_querystring(),
)
def get_delete_url(self, user_id=None):
if user_id is None:
user_id = self.get_sample_user_id()
return "%s?%s" % (
reverse(
"admin:auth_user_delete",
args=(user_id,),
current_app=self.admin_site.name,
),
self.get_preserved_filters_querystring(),
)
def test_changelist_view(self):
response = self.client.get(self.get_changelist_url())
self.assertEqual(response.status_code, 200)
# Check the `change_view` link has the correct querystring.
detail_link = re.search(
'<a href="(.*?)">{}</a>'.format(self.joepublicuser.username),
response.content.decode(),
)
self.assertURLEqual(detail_link[1], self.get_change_url())
def test_change_view(self):
# Get the `change_view`.
response = self.client.get(self.get_change_url())
self.assertEqual(response.status_code, 200)
# Check the form action.
form_action = re.search(
'<form action="(.*?)" method="post" id="user_form" novalidate>',
response.content.decode(),
)
self.assertURLEqual(
form_action[1], "?%s" % self.get_preserved_filters_querystring()
)
# Check the history link.
history_link = re.search(
'<a href="(.*?)" class="historylink">History</a>', response.content.decode()
)
self.assertURLEqual(history_link[1], self.get_history_url())
# Check the delete link.
delete_link = re.search(
'<a href="(.*?)" class="deletelink">Delete</a>', response.content.decode()
)
self.assertURLEqual(delete_link[1], self.get_delete_url())
# Test redirect on "Save".
post_data = {
"username": "joepublic",
"last_login_0": "2007-05-30",
"last_login_1": "13:20:10",
"date_joined_0": "2007-05-30",
"date_joined_1": "13:20:10",
}
post_data["_save"] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertRedirects(response, self.get_changelist_url())
post_data.pop("_save")
# Test redirect on "Save and continue".
post_data["_continue"] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertRedirects(response, self.get_change_url())
post_data.pop("_continue")
# Test redirect on "Save and add new".
post_data["_addanother"] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertRedirects(response, self.get_add_url())
post_data.pop("_addanother")
def test_change_view_without_preserved_filters(self):
response = self.client.get(self.get_change_url(add_preserved_filters=False))
# The action attribute is omitted.
self.assertContains(response, '<form method="post" id="user_form" novalidate>')
def test_add_view(self):
# Get the `add_view`.
response = self.client.get(self.get_add_url())
self.assertEqual(response.status_code, 200)
# Check the form action.
form_action = re.search(
'<form action="(.*?)" method="post" id="user_form" novalidate>',
response.content.decode(),
)
self.assertURLEqual(
form_action[1], "?%s" % self.get_preserved_filters_querystring()
)
post_data = {
"username": "dummy",
"password1": "test",
"password2": "test",
}
# Test redirect on "Save".
post_data["_save"] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertRedirects(
response, self.get_change_url(User.objects.get(username="dummy").pk)
)
post_data.pop("_save")
# Test redirect on "Save and continue".
post_data["username"] = "dummy2"
post_data["_continue"] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertRedirects(
response, self.get_change_url(User.objects.get(username="dummy2").pk)
)
post_data.pop("_continue")
# Test redirect on "Save and add new".
post_data["username"] = "dummy3"
post_data["_addanother"] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertRedirects(response, self.get_add_url())
post_data.pop("_addanother")
def test_add_view_without_preserved_filters(self):
response = self.client.get(self.get_add_url(add_preserved_filters=False))
# The action attribute is omitted.
self.assertContains(response, '<form method="post" id="user_form" novalidate>')
def test_delete_view(self):
# Test redirect on "Delete".
response = self.client.post(self.get_delete_url(), {"post": "yes"})
self.assertRedirects(response, self.get_changelist_url())
def test_url_prefix(self):
context = {
"preserved_filters": self.get_preserved_filters_querystring(),
"opts": User._meta,
}
prefixes = ("", "/prefix/", "/後台/")
for prefix in prefixes:
with self.subTest(prefix=prefix), override_script_prefix(prefix):
url = reverse(
"admin:auth_user_changelist", current_app=self.admin_site.name
)
self.assertURLEqual(
self.get_changelist_url(),
add_preserved_filters(context, url),
)
class NamespacedAdminKeepChangeListFiltersTests(AdminKeepChangeListFiltersTests):
admin_site = site2
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestLabelVisibility(TestCase):
"""#11277 -Labels of hidden fields in admin were not hidden."""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_all_fields_visible(self):
response = self.client.get(reverse("admin:admin_views_emptymodelvisible_add"))
self.assert_fieldline_visible(response)
self.assert_field_visible(response, "first")
self.assert_field_visible(response, "second")
def test_all_fields_hidden(self):
response = self.client.get(reverse("admin:admin_views_emptymodelhidden_add"))
self.assert_fieldline_hidden(response)
self.assert_field_hidden(response, "first")
self.assert_field_hidden(response, "second")
def test_mixin(self):
response = self.client.get(reverse("admin:admin_views_emptymodelmixin_add"))
self.assert_fieldline_visible(response)
self.assert_field_hidden(response, "first")
self.assert_field_visible(response, "second")
def assert_field_visible(self, response, field_name):
self.assertContains(response, '<div class="fieldBox field-%s">' % field_name)
def assert_field_hidden(self, response, field_name):
self.assertContains(
response, '<div class="fieldBox field-%s hidden">' % field_name
)
def assert_fieldline_visible(self, response):
self.assertContains(response, '<div class="form-row field-first field-second">')
def assert_fieldline_hidden(self, response):
self.assertContains(response, '<div class="form-row hidden')
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminViewOnSiteTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = State.objects.create(name="New York")
cls.s2 = State.objects.create(name="Illinois")
cls.s3 = State.objects.create(name="California")
cls.c1 = City.objects.create(state=cls.s1, name="New York")
cls.c2 = City.objects.create(state=cls.s2, name="Chicago")
cls.c3 = City.objects.create(state=cls.s3, name="San Francisco")
cls.r1 = Restaurant.objects.create(city=cls.c1, name="Italian Pizza")
cls.r2 = Restaurant.objects.create(city=cls.c1, name="Boulevard")
cls.r3 = Restaurant.objects.create(city=cls.c2, name="Chinese Dinner")
cls.r4 = Restaurant.objects.create(city=cls.c2, name="Angels")
cls.r5 = Restaurant.objects.create(city=cls.c2, name="Take Away")
cls.r6 = Restaurant.objects.create(city=cls.c3, name="The Unknown Restaurant")
cls.w1 = Worker.objects.create(work_at=cls.r1, name="Mario", surname="Rossi")
cls.w2 = Worker.objects.create(
work_at=cls.r1, name="Antonio", surname="Bianchi"
)
cls.w3 = Worker.objects.create(work_at=cls.r1, name="John", surname="Doe")
def setUp(self):
self.client.force_login(self.superuser)
def test_add_view_form_and_formsets_run_validation(self):
"""
Issue #20522
Verifying that if the parent form fails validation, the inlines also
run validation even if validation is contingent on parent form data.
Also, assertFormError() and assertFormsetError() is usable for admin
forms and formsets.
"""
# The form validation should fail because 'some_required_info' is
# not included on the parent form, and the family_name of the parent
# does not match that of the child
post_data = {
"family_name": "Test1",
"dependentchild_set-TOTAL_FORMS": "1",
"dependentchild_set-INITIAL_FORMS": "0",
"dependentchild_set-MAX_NUM_FORMS": "1",
"dependentchild_set-0-id": "",
"dependentchild_set-0-parent": "",
"dependentchild_set-0-family_name": "Test2",
}
response = self.client.post(
reverse("admin:admin_views_parentwithdependentchildren_add"), post_data
)
self.assertFormError(
response.context["adminform"],
"some_required_info",
["This field is required."],
)
self.assertFormError(response.context["adminform"], None, [])
self.assertFormsetError(
response.context["inline_admin_formset"],
0,
None,
[
"Children must share a family name with their parents in this "
"contrived test case"
],
)
self.assertFormsetError(
response.context["inline_admin_formset"], None, None, []
)
def test_change_view_form_and_formsets_run_validation(self):
"""
Issue #20522
Verifying that if the parent form fails validation, the inlines also
run validation even if validation is contingent on parent form data
"""
pwdc = ParentWithDependentChildren.objects.create(
some_required_info=6, family_name="Test1"
)
# The form validation should fail because 'some_required_info' is
# not included on the parent form, and the family_name of the parent
# does not match that of the child
post_data = {
"family_name": "Test2",
"dependentchild_set-TOTAL_FORMS": "1",
"dependentchild_set-INITIAL_FORMS": "0",
"dependentchild_set-MAX_NUM_FORMS": "1",
"dependentchild_set-0-id": "",
"dependentchild_set-0-parent": str(pwdc.id),
"dependentchild_set-0-family_name": "Test1",
}
response = self.client.post(
reverse(
"admin:admin_views_parentwithdependentchildren_change", args=(pwdc.id,)
),
post_data,
)
self.assertFormError(
response.context["adminform"],
"some_required_info",
["This field is required."],
)
self.assertFormsetError(
response.context["inline_admin_formset"],
0,
None,
[
"Children must share a family name with their parents in this "
"contrived test case"
],
)
def test_check(self):
"The view_on_site value is either a boolean or a callable"
try:
admin = CityAdmin(City, AdminSite())
CityAdmin.view_on_site = True
self.assertEqual(admin.check(), [])
CityAdmin.view_on_site = False
self.assertEqual(admin.check(), [])
CityAdmin.view_on_site = lambda obj: obj.get_absolute_url()
self.assertEqual(admin.check(), [])
CityAdmin.view_on_site = []
self.assertEqual(
admin.check(),
[
Error(
"The value of 'view_on_site' must be a callable or a boolean "
"value.",
obj=CityAdmin,
id="admin.E025",
),
],
)
finally:
# Restore the original values for the benefit of other tests.
CityAdmin.view_on_site = True
def test_false(self):
"The 'View on site' button is not displayed if view_on_site is False"
response = self.client.get(
reverse("admin:admin_views_restaurant_change", args=(self.r1.pk,))
)
content_type_pk = ContentType.objects.get_for_model(Restaurant).pk
self.assertNotContains(
response, reverse("admin:view_on_site", args=(content_type_pk, 1))
)
def test_true(self):
"The default behavior is followed if view_on_site is True"
response = self.client.get(
reverse("admin:admin_views_city_change", args=(self.c1.pk,))
)
content_type_pk = ContentType.objects.get_for_model(City).pk
self.assertContains(
response, reverse("admin:view_on_site", args=(content_type_pk, self.c1.pk))
)
def test_callable(self):
"The right link is displayed if view_on_site is a callable"
response = self.client.get(
reverse("admin:admin_views_worker_change", args=(self.w1.pk,))
)
self.assertContains(
response, '"/worker/%s/%s/"' % (self.w1.surname, self.w1.name)
)
def test_missing_get_absolute_url(self):
"None is returned if model doesn't have get_absolute_url"
model_admin = ModelAdmin(Worker, None)
self.assertIsNone(model_admin.get_view_on_site_url(Worker()))
def test_custom_admin_site(self):
model_admin = ModelAdmin(City, customadmin.site)
content_type_pk = ContentType.objects.get_for_model(City).pk
redirect_url = model_admin.get_view_on_site_url(self.c1)
self.assertEqual(
redirect_url,
reverse(
f"{customadmin.site.name}:view_on_site",
kwargs={
"content_type_id": content_type_pk,
"object_id": self.c1.pk,
},
),
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class InlineAdminViewOnSiteTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
cls.s1 = State.objects.create(name="New York")
cls.s2 = State.objects.create(name="Illinois")
cls.s3 = State.objects.create(name="California")
cls.c1 = City.objects.create(state=cls.s1, name="New York")
cls.c2 = City.objects.create(state=cls.s2, name="Chicago")
cls.c3 = City.objects.create(state=cls.s3, name="San Francisco")
cls.r1 = Restaurant.objects.create(city=cls.c1, name="Italian Pizza")
cls.r2 = Restaurant.objects.create(city=cls.c1, name="Boulevard")
cls.r3 = Restaurant.objects.create(city=cls.c2, name="Chinese Dinner")
cls.r4 = Restaurant.objects.create(city=cls.c2, name="Angels")
cls.r5 = Restaurant.objects.create(city=cls.c2, name="Take Away")
cls.r6 = Restaurant.objects.create(city=cls.c3, name="The Unknown Restaurant")
cls.w1 = Worker.objects.create(work_at=cls.r1, name="Mario", surname="Rossi")
cls.w2 = Worker.objects.create(
work_at=cls.r1, name="Antonio", surname="Bianchi"
)
cls.w3 = Worker.objects.create(work_at=cls.r1, name="John", surname="Doe")
def setUp(self):
self.client.force_login(self.superuser)
def test_false(self):
"The 'View on site' button is not displayed if view_on_site is False"
response = self.client.get(
reverse("admin:admin_views_state_change", args=(self.s1.pk,))
)
content_type_pk = ContentType.objects.get_for_model(City).pk
self.assertNotContains(
response, reverse("admin:view_on_site", args=(content_type_pk, self.c1.pk))
)
def test_true(self):
"The 'View on site' button is displayed if view_on_site is True"
response = self.client.get(
reverse("admin:admin_views_city_change", args=(self.c1.pk,))
)
content_type_pk = ContentType.objects.get_for_model(Restaurant).pk
self.assertContains(
response, reverse("admin:view_on_site", args=(content_type_pk, self.r1.pk))
)
def test_callable(self):
"The right link is displayed if view_on_site is a callable"
response = self.client.get(
reverse("admin:admin_views_restaurant_change", args=(self.r1.pk,))
)
self.assertContains(
response, '"/worker_inline/%s/%s/"' % (self.w1.surname, self.w1.name)
)
@override_settings(ROOT_URLCONF="admin_views.urls")
class GetFormsetsWithInlinesArgumentTest(TestCase):
"""
#23934 - When adding a new model instance in the admin, the 'obj' argument
of get_formsets_with_inlines() should be None. When changing, it should be
equal to the existing model instance.
The GetFormsetsArgumentCheckingAdmin ModelAdmin throws an exception
if obj is not None during add_view or obj is None during change_view.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_explicitly_provided_pk(self):
post_data = {"name": "1"}
response = self.client.post(
reverse("admin:admin_views_explicitlyprovidedpk_add"), post_data
)
self.assertEqual(response.status_code, 302)
post_data = {"name": "2"}
response = self.client.post(
reverse("admin:admin_views_explicitlyprovidedpk_change", args=(1,)),
post_data,
)
self.assertEqual(response.status_code, 302)
def test_implicitly_generated_pk(self):
post_data = {"name": "1"}
response = self.client.post(
reverse("admin:admin_views_implicitlygeneratedpk_add"), post_data
)
self.assertEqual(response.status_code, 302)
post_data = {"name": "2"}
response = self.client.post(
reverse("admin:admin_views_implicitlygeneratedpk_change", args=(1,)),
post_data,
)
self.assertEqual(response.status_code, 302)
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminSiteFinalCatchAllPatternTests(TestCase):
"""
Verifies the behaviour of the admin catch-all view.
* Anonynous/non-staff users are redirected to login for all URLs, whether
otherwise valid or not.
* APPEND_SLASH is applied for staff if needed.
* Otherwise Http404.
* Catch-all view disabled via AdminSite.final_catch_all_view.
"""
def test_unknown_url_redirects_login_if_not_authenticated(self):
unknown_url = "/test_admin/admin/unknown/"
response = self.client.get(unknown_url)
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin:login"), unknown_url)
)
def test_unknown_url_404_if_authenticated(self):
superuser = User.objects.create_superuser(
username="super",
password="secret",
email="[email protected]",
)
self.client.force_login(superuser)
unknown_url = "/test_admin/admin/unknown/"
response = self.client.get(unknown_url)
self.assertEqual(response.status_code, 404)
def test_known_url_redirects_login_if_not_authenticated(self):
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url)
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin:login"), known_url)
)
def test_known_url_missing_slash_redirects_login_if_not_authenticated(self):
known_url = reverse("admin:admin_views_article_changelist")[:-1]
response = self.client.get(known_url)
# Redirects with the next URL also missing the slash.
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin:login"), known_url)
)
def test_non_admin_url_shares_url_prefix(self):
url = reverse("non_admin")[:-1]
response = self.client.get(url)
# Redirects with the next URL also missing the slash.
self.assertRedirects(response, "%s?next=%s" % (reverse("admin:login"), url))
def test_url_without_trailing_slash_if_not_authenticated(self):
url = reverse("admin:article_extra_json")
response = self.client.get(url)
self.assertRedirects(response, "%s?next=%s" % (reverse("admin:login"), url))
def test_unkown_url_without_trailing_slash_if_not_authenticated(self):
url = reverse("admin:article_extra_json")[:-1]
response = self.client.get(url)
self.assertRedirects(response, "%s?next=%s" % (reverse("admin:login"), url))
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_unknown_url(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
unknown_url = "/test_admin/admin/unknown/"
response = self.client.get(unknown_url[:-1])
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response, known_url, status_code=301, target_status_code=403
)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_script_name(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1], SCRIPT_NAME="/prefix/")
self.assertRedirects(
response,
"/prefix" + known_url,
status_code=301,
fetch_redirect_response=False,
)
@override_settings(APPEND_SLASH=True, FORCE_SCRIPT_NAME="/prefix/")
def test_missing_slash_append_slash_true_force_script_name(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response,
"/prefix" + known_url,
status_code=301,
fetch_redirect_response=False,
)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_non_staff_user(self):
user = User.objects.create_user(
username="user",
password="secret",
email="[email protected]",
is_staff=False,
)
self.client.force_login(user)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response,
"/test_admin/admin/login/?next=/test_admin/admin/admin_views/article",
)
@override_settings(APPEND_SLASH=False)
def test_missing_slash_append_slash_false(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_single_model_no_append_slash(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin9:admin_views_actor_changelist")
response = self.client.get(known_url[:-1])
self.assertEqual(response.status_code, 404)
# Same tests above with final_catch_all_view=False.
def test_unknown_url_404_if_not_authenticated_without_final_catch_all_view(self):
unknown_url = "/test_admin/admin10/unknown/"
response = self.client.get(unknown_url)
self.assertEqual(response.status_code, 404)
def test_unknown_url_404_if_authenticated_without_final_catch_all_view(self):
superuser = User.objects.create_superuser(
username="super",
password="secret",
email="[email protected]",
)
self.client.force_login(superuser)
unknown_url = "/test_admin/admin10/unknown/"
response = self.client.get(unknown_url)
self.assertEqual(response.status_code, 404)
def test_known_url_redirects_login_if_not_auth_without_final_catch_all_view(
self,
):
known_url = reverse("admin10:admin_views_article_changelist")
response = self.client.get(known_url)
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin10:login"), known_url)
)
def test_known_url_missing_slash_redirects_with_slash_if_not_auth_no_catch_all_view(
self,
):
known_url = reverse("admin10:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response, known_url, status_code=301, fetch_redirect_response=False
)
def test_non_admin_url_shares_url_prefix_without_final_catch_all_view(self):
url = reverse("non_admin10")
response = self.client.get(url[:-1])
self.assertRedirects(response, url, status_code=301)
def test_url_no_trailing_slash_if_not_auth_without_final_catch_all_view(
self,
):
url = reverse("admin10:article_extra_json")
response = self.client.get(url)
self.assertRedirects(response, "%s?next=%s" % (reverse("admin10:login"), url))
def test_unknown_url_no_trailing_slash_if_not_auth_without_final_catch_all_view(
self,
):
url = reverse("admin10:article_extra_json")[:-1]
response = self.client.get(url)
# Matches test_admin/admin10/admin_views/article/<path:object_id>/
self.assertRedirects(
response, url + "/", status_code=301, fetch_redirect_response=False
)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_unknown_url_without_final_catch_all_view(
self,
):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
unknown_url = "/test_admin/admin10/unknown/"
response = self.client.get(unknown_url[:-1])
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_missing_slash_append_slash_true_without_final_catch_all_view(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin10:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertRedirects(
response, known_url, status_code=301, target_status_code=403
)
@override_settings(APPEND_SLASH=False)
def test_missing_slash_append_slash_false_without_final_catch_all_view(self):
superuser = User.objects.create_user(
username="staff",
password="secret",
email="[email protected]",
is_staff=True,
)
self.client.force_login(superuser)
known_url = reverse("admin10:admin_views_article_changelist")
response = self.client.get(known_url[:-1])
self.assertEqual(response.status_code, 404)
# Outside admin.
def test_non_admin_url_404_if_not_authenticated(self):
unknown_url = "/unknown/"
response = self.client.get(unknown_url)
# Does not redirect to the admin login.
self.assertEqual(response.status_code, 404)
|
299b62cf7e95fa04d1df7d052e9d7f6e90579da8d6054bd549181ef8af51fd34 | import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from unittest import mock
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
Aggregate,
Avg,
Case,
Count,
DecimalField,
F,
IntegerField,
Max,
Q,
StdDev,
Sum,
Value,
Variance,
When,
)
from django.test import TestCase, skipUnlessAnyDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
from .models import (
Alfa,
Author,
Book,
Bravo,
Charlie,
Clues,
Entries,
HardbackBook,
ItemTag,
Publisher,
SelfRefFK,
Store,
WithManualPK,
)
class AggregationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name="Adrian Holovaty", age=34)
cls.a2 = Author.objects.create(name="Jacob Kaplan-Moss", age=35)
cls.a3 = Author.objects.create(name="Brad Dayley", age=45)
cls.a4 = Author.objects.create(name="James Bennett", age=29)
cls.a5 = Author.objects.create(name="Jeffrey Forcier", age=37)
cls.a6 = Author.objects.create(name="Paul Bissex", age=29)
cls.a7 = Author.objects.create(name="Wesley J. Chun", age=25)
cls.a8 = Author.objects.create(name="Peter Norvig", age=57)
cls.a9 = Author.objects.create(name="Stuart Russell", age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name="Apress", num_awards=3)
cls.p2 = Publisher.objects.create(name="Sams", num_awards=1)
cls.p3 = Publisher.objects.create(name="Prentice Hall", num_awards=7)
cls.p4 = Publisher.objects.create(name="Morgan Kaufmann", num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn="159059725",
name="The Definitive Guide to Django: Web Development Done Right",
pages=447,
rating=4.5,
price=Decimal("30.00"),
contact=cls.a1,
publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6),
)
cls.b2 = Book.objects.create(
isbn="067232959",
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
rating=3.0,
price=Decimal("23.09"),
contact=cls.a3,
publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3),
)
cls.b3 = Book.objects.create(
isbn="159059996",
name="Practical Django Projects",
pages=300,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a4,
publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23),
)
cls.b4 = Book.objects.create(
isbn="013235613",
name="Python Web Development with Django",
pages=350,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a5,
publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3),
)
cls.b5 = HardbackBook.objects.create(
isbn="013790395",
name="Artificial Intelligence: A Modern Approach",
pages=1132,
rating=4.0,
price=Decimal("82.80"),
contact=cls.a8,
publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15),
weight=4.5,
)
cls.b6 = HardbackBook.objects.create(
isbn="155860191",
name=(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp"
),
pages=946,
rating=5.0,
price=Decimal("75.00"),
contact=cls.a8,
publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15),
weight=3.7,
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name="Amazon.com",
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59),
)
s2 = Store.objects.create(
name="Books.com",
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59),
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30),
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in kwargs.items():
self.assertEqual(getattr(obj, attr), value)
def test_annotation_with_value(self):
values = (
Book.objects.filter(
name="Practical Django Projects",
)
.annotate(
discount_price=F("price") * 2,
)
.values(
"discount_price",
)
.annotate(sum_discount=Sum("discount_price"))
)
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(
values,
[
{
"discount_price": Decimal("59.38"),
"sum_discount": Decimal("59.38"),
}
],
)
if connection.features.allows_group_by_refs:
alias = connection.ops.quote_name("discount_price")
self.assertIn(f"GROUP BY {alias}", ctx[0]["sql"])
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
The subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values("contact").annotate(Max("id"))
qs = qs.order_by("contact").values_list("id__max", flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by("id")
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values("contact").annotate(Max("id"))
qs = qs.order_by("contact").values_list("id__max", flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by("id")
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
# Oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(
select={
"name_of_shortest_book": shortest_book_sql,
}
).annotate(total_books=Count("book"))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)},
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum("pages"), Avg("pages")),
{"pages__sum": 3703, "pages__avg": Approximate(617.166, places=2)},
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum("pages"), Avg("pages")),
{"pages__sum": 3703, "pages__avg": Approximate(617.166, places=2)},
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={"price_per_page": "price / pages"}).aggregate(
Sum("pages")
),
{"pages__sum": 3703},
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"manufacture_cost": "price * .5"})
.get(pk=self.b2.pk)
)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn="067232959",
mean_auth_age=45.0,
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0,
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal("11.545")))
# Order of the annotate/extra in the query doesn't matter
obj = (
Book.objects.extra(select={"manufacture_cost": "price * .5"})
.annotate(mean_auth_age=Avg("authors__age"))
.get(pk=self.b2.pk)
)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn="067232959",
mean_auth_age=45.0,
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0,
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal("11.545")))
# Values queries can be combined with annotate and extra
obj = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"manufacture_cost": "price * .5"})
.values()
.get(pk=self.b2.pk)
)
manufacture_cost = obj["manufacture_cost"]
self.assertIn(manufacture_cost, (11.545, Decimal("11.545")))
del obj["manufacture_cost"]
self.assertEqual(
obj,
{
"id": self.b2.id,
"contact_id": self.a3.id,
"isbn": "067232959",
"mean_auth_age": 45.0,
"name": "Sams Teach Yourself Django in 24 Hours",
"pages": 528,
"price": Decimal("23.09"),
"pubdate": datetime.date(2008, 3, 3),
"publisher_id": self.p2.id,
"rating": 3.0,
},
)
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = (
Book.objects.values()
.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"manufacture_cost": "price * .5"})
.get(pk=self.b2.pk)
)
manufacture_cost = obj["manufacture_cost"]
self.assertIn(manufacture_cost, (11.545, Decimal("11.545")))
del obj["manufacture_cost"]
self.assertEqual(
obj,
{
"id": self.b2.id,
"contact_id": self.a3.id,
"isbn": "067232959",
"mean_auth_age": 45.0,
"name": "Sams Teach Yourself Django in 24 Hours",
"pages": 528,
"price": Decimal("23.09"),
"pubdate": datetime.date(2008, 3, 3),
"publisher_id": self.p2.id,
"rating": 3.0,
},
)
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"price_per_page": "price / pages"})
.values("name")
.get(pk=self.b1.pk)
)
self.assertEqual(
obj,
{
"name": "The Definitive Guide to Django: Web Development Done Right",
},
)
obj = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"price_per_page": "price / pages"})
.values("name", "mean_auth_age")
.get(pk=self.b1.pk)
)
self.assertEqual(
obj,
{
"mean_auth_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
},
)
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = (
Book.objects.annotate(n_authors=Count("authors"))
.values("name")
.filter(n_authors__gt=2)
)
self.assertSequenceEqual(
qs,
[{"name": "Python Web Development with Django"}],
)
# The annotations are added to values output if values() precedes
# annotate()
obj = (
Book.objects.values("name")
.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"price_per_page": "price / pages"})
.get(pk=self.b1.pk)
)
self.assertEqual(
obj,
{
"mean_auth_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
},
)
# All of the objects are getting counted (allow_nulls) and that values
# respects the amount of objects
self.assertEqual(len(Author.objects.annotate(Avg("friends__age")).values()), 9)
# Consecutive calls to annotate accumulate in the query
qs = (
Book.objects.values("price")
.annotate(oldest=Max("authors__age"))
.order_by("oldest", "price")
.annotate(Max("publisher__num_awards"))
)
self.assertSequenceEqual(
qs,
[
{"price": Decimal("30"), "oldest": 35, "publisher__num_awards__max": 3},
{
"price": Decimal("29.69"),
"oldest": 37,
"publisher__num_awards__max": 7,
},
{
"price": Decimal("23.09"),
"oldest": 45,
"publisher__num_awards__max": 1,
},
{"price": Decimal("75"), "oldest": 57, "publisher__num_awards__max": 9},
{
"price": Decimal("82.8"),
"oldest": 57,
"publisher__num_awards__max": 7,
},
],
)
def test_aggregate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(
Max("pages"), Max("price"), Sum("num_authors"), Avg("num_authors")
)
self.assertEqual(
vals,
{
"num_authors__sum": 10,
"num_authors__avg": Approximate(1.666, places=2),
"pages__max": 1132,
"price__max": Decimal("82.80"),
},
)
# Regression for #15624 - Missing SELECT columns when using values, annotate
# and aggregate in a single query
self.assertEqual(
Book.objects.annotate(c=Count("authors")).values("c").aggregate(Max("c")),
{"c__max": 3},
)
def test_conditional_aggregate(self):
# Conditional aggregation of a grouped queryset.
self.assertEqual(
Book.objects.annotate(c=Count("authors"))
.values("pk")
.aggregate(test=Sum(Case(When(c__gt=1, then=1))))["test"],
3,
)
def test_sliced_conditional_aggregate(self):
self.assertEqual(
Author.objects.order_by("pk")[:5].aggregate(
test=Sum(Case(When(age__lte=35, then=1)))
)["test"],
3,
)
def test_annotated_conditional_aggregate(self):
annotated_qs = Book.objects.annotate(
discount_price=F("price") * Decimal("0.75")
)
self.assertAlmostEqual(
annotated_qs.aggregate(
test=Avg(
Case(
When(pages__lt=400, then="discount_price"),
output_field=DecimalField(),
)
)
)["test"],
Decimal("22.27"),
places=2,
)
def test_distinct_conditional_aggregate(self):
self.assertEqual(
Book.objects.distinct().aggregate(
test=Avg(
Case(
When(price=Decimal("29.69"), then="pages"),
output_field=IntegerField(),
)
)
)["test"],
325,
)
def test_conditional_aggregate_on_complex_condition(self):
self.assertEqual(
Book.objects.distinct().aggregate(
test=Avg(
Case(
When(
Q(price__gte=Decimal("29")) & Q(price__lt=Decimal("30")),
then="pages",
),
output_field=IntegerField(),
)
)
)["test"],
325,
)
def test_q_annotation_aggregate(self):
self.assertEqual(Book.objects.annotate(has_pk=Q(pk__isnull=False)).count(), 6)
def test_decimal_aggregate_annotation_filter(self):
"""
Filtering on an aggregate annotation with Decimal values should work.
Requires special handling on SQLite (#18247).
"""
self.assertEqual(
len(
Author.objects.annotate(sum=Sum("book_contact_set__price")).filter(
sum__gt=Decimal(40)
)
),
1,
)
self.assertEqual(
len(
Author.objects.annotate(sum=Sum("book_contact_set__price")).filter(
sum__lte=Decimal(40)
)
),
4,
)
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
msg = (
"Cannot resolve keyword 'foo' into field. Choices are: authors, "
"contact, contact_id, hardbackbook, id, isbn, name, pages, price, "
"pubdate, publisher, publisher_id, rating, store, tags"
)
with self.assertRaisesMessage(FieldError, msg):
Book.objects.aggregate(num_authors=Count("foo"))
with self.assertRaisesMessage(FieldError, msg):
Book.objects.annotate(num_authors=Count("foo"))
msg = (
"Cannot resolve keyword 'foo' into field. Choices are: authors, "
"contact, contact_id, hardbackbook, id, isbn, name, num_authors, "
"pages, price, pubdate, publisher, publisher_id, rating, store, tags"
)
with self.assertRaisesMessage(FieldError, msg):
Book.objects.annotate(num_authors=Count("authors__id")).aggregate(
Max("foo")
)
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(Book.objects.annotate(num_authors=Count("authors")).count(), 6)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count("authors")).aggregate(
Max("num_authors")
)
self.assertEqual(vals, {"num_authors__max": 3})
vals = Publisher.objects.annotate(avg_price=Avg("book__price")).aggregate(
Max("avg_price")
)
self.assertEqual(vals, {"avg_price__max": 75.0})
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max("pages"), select=Max("pages"))
self.assertEqual(vals, {"number": 1132, "select": 1132})
# Regression for #10064: select_related() plays nice with aggregates
obj = (
Book.objects.select_related("publisher")
.annotate(num_authors=Count("authors"))
.values()
.get(isbn="013790395")
)
self.assertEqual(
obj,
{
"contact_id": self.a8.id,
"id": self.b5.id,
"isbn": "013790395",
"name": "Artificial Intelligence: A Modern Approach",
"num_authors": 2,
"pages": 1132,
"price": Decimal("82.8"),
"pubdate": datetime.date(1995, 1, 15),
"publisher_id": self.p3.id,
"rating": 4.0,
},
)
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(len(Book.objects.annotate(num_authors=Count("authors"))), 6)
self.assertEqual(
len(
Book.objects.annotate(num_authors=Count("authors")).filter(
num_authors__gt=2
)
),
1,
)
self.assertEqual(
len(
Book.objects.annotate(num_authors=Count("authors")).exclude(
num_authors__gt=2
)
),
5,
)
self.assertEqual(
len(
Book.objects.annotate(num_authors=Count("authors"))
.filter(num_authors__lt=3)
.exclude(num_authors__lt=2)
),
2,
)
self.assertEqual(
len(
Book.objects.annotate(num_authors=Count("authors"))
.exclude(num_authors__lt=2)
.filter(num_authors__lt=3)
),
2,
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__lt=F("num_awards") / 2)
.order_by("name")
.values("name", "num_books", "num_awards")
)
self.assertSequenceEqual(
qs,
[
{"num_books": 1, "name": "Morgan Kaufmann", "num_awards": 9},
{"num_books": 2, "name": "Prentice Hall", "num_awards": 7},
],
)
qs = (
Publisher.objects.annotate(num_books=Count("book"))
.exclude(num_books__lt=F("num_awards") / 2)
.order_by("name")
.values("name", "num_books", "num_awards")
)
self.assertSequenceEqual(
qs,
[
{"num_books": 2, "name": "Apress", "num_awards": 3},
{"num_books": 0, "name": "Jonno's House of Books", "num_awards": 0},
{"num_books": 1, "name": "Sams", "num_awards": 1},
],
)
# ... and where the F() references an aggregate
qs = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_awards__gt=2 * F("num_books"))
.order_by("name")
.values("name", "num_books", "num_awards")
)
self.assertSequenceEqual(
qs,
[
{"num_books": 1, "name": "Morgan Kaufmann", "num_awards": 9},
{"num_books": 2, "name": "Prentice Hall", "num_awards": 7},
],
)
qs = (
Publisher.objects.annotate(num_books=Count("book"))
.exclude(num_books__lt=F("num_awards") / 2)
.order_by("name")
.values("name", "num_books", "num_awards")
)
self.assertSequenceEqual(
qs,
[
{"num_books": 2, "name": "Apress", "num_awards": 3},
{"num_books": 0, "name": "Jonno's House of Books", "num_awards": 0},
{"num_books": 1, "name": "Sams", "num_awards": 1},
],
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = Clues.objects.values("EntryID__Entry").annotate(
Appearances=Count("EntryID"), Distinct_Clues=Count("Clue", distinct=True)
)
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count("clues__ID"))
self.assertQuerysetEqual(qs, [])
def test_boolean_conversion(self):
# Aggregates mixed up ordering of columns for backend's convert_values
# method. Refs #21126.
e = Entries.objects.create(Entry="foo")
c = Clues.objects.create(EntryID=e, Clue="bar")
qs = Clues.objects.select_related("EntryID").annotate(Count("ID"))
self.assertSequenceEqual(qs, [c])
self.assertEqual(qs[0].EntryID, e)
self.assertIs(qs[0].EntryID.Exclude, False)
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(Book.objects.filter(id__in=[]).count(), 0)
vals = Book.objects.filter(id__in=[]).aggregate(
num_authors=Count("authors"),
avg_authors=Avg("authors"),
max_authors=Max("authors"),
max_price=Max("price"),
max_rating=Max("rating"),
)
self.assertEqual(
vals,
{
"max_authors": None,
"max_rating": None,
"num_authors": 0,
"avg_authors": None,
"max_price": None,
},
)
qs = (
Publisher.objects.filter(name="Jonno's House of Books")
.annotate(
num_authors=Count("book__authors"),
avg_authors=Avg("book__authors"),
max_authors=Max("book__authors"),
max_price=Max("book__price"),
max_rating=Max("book__rating"),
)
.values()
)
self.assertSequenceEqual(
qs,
[
{
"max_authors": None,
"name": "Jonno's House of Books",
"num_awards": 0,
"max_price": None,
"num_authors": 0,
"max_rating": None,
"id": self.p5.id,
"avg_authors": None,
}
],
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count("authors")).order_by(
"publisher__name", "name"
),
[
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name,
)
# Regression for #10127 - Empty select_related() works with annotate
qs = (
Book.objects.filter(rating__lt=4.5)
.select_related()
.annotate(Avg("authors__age"))
.order_by("name")
)
self.assertQuerysetEqual(
qs,
[
(
"Artificial Intelligence: A Modern Approach",
51.5,
"Prentice Hall",
"Peter Norvig",
),
("Practical Django Projects", 29.0, "Apress", "James Bennett"),
(
"Python Web Development with Django",
Approximate(30.333, places=2),
"Prentice Hall",
"Jeffrey Forcier",
),
("Sams Teach Yourself Django in 24 Hours", 45.0, "Sams", "Brad Dayley"),
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name),
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = (
Book.objects.extra(select={"pub": "publisher_id"})
.values("pub")
.annotate(Count("id"))
.order_by("pub")
)
self.assertSequenceEqual(
qs,
[
{"pub": self.p1.id, "id__count": 2},
{"pub": self.p2.id, "id__count": 1},
{"pub": self.p3.id, "id__count": 2},
{"pub": self.p4.id, "id__count": 1},
],
)
qs = (
Book.objects.extra(select={"pub": "publisher_id", "foo": "pages"})
.values("pub")
.annotate(Count("id"))
.order_by("pub")
)
self.assertSequenceEqual(
qs,
[
{"pub": self.p1.id, "id__count": 2},
{"pub": self.p2.id, "id__count": 1},
{"pub": self.p3.id, "id__count": 2},
{"pub": self.p4.id, "id__count": 1},
],
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = (
Book.objects.filter(pages__gt=100)
.annotate(n_authors=Count("authors"))
.filter(n_authors__gt=2)
.order_by("n_authors")
)
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids),
[
"Python Web Development with Django",
],
lambda b: b.name,
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qstr = str(
Book.objects.values("publisher")
.annotate(max_pages=Max("pages"))
.order_by()
.query
)
# There is just one GROUP BY clause (zero commas means at most one clause).
self.assertEqual(qstr[qstr.index("GROUP BY") :].count(", "), 0)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
msg = (
"The named annotation 'authors__age__avg' conflicts with "
"the default name for another annotation."
)
with self.assertRaisesMessage(ValueError, msg):
Book.objects.annotate(
Avg("authors__age"), authors__age__avg=Avg("authors__age")
)
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with a field name on the model raises ValueError
msg = "The annotation 'age' conflicts with a field on the model."
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(age=Avg("friends__age"))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with an m2m name on the model raises ValueError
msg = "The annotation 'friends' conflicts with a field on the model."
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(friends=Count("friends"))
def test_fk_attname_conflict(self):
msg = "The annotation 'contact_id' conflicts with a field on the model."
with self.assertRaisesMessage(ValueError, msg):
Book.objects.annotate(contact_id=F("publisher_id"))
def test_values_queryset_non_conflict(self):
# If you're using a values query set, some potential conflicts are
# avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in values(), so it is.
results = (
Author.objects.values("name")
.annotate(age=Count("book_contact_set"))
.order_by("name")
)
self.assertEqual(len(results), 9)
self.assertEqual(results[0]["name"], "Adrian Holovaty")
self.assertEqual(results[0]["age"], 1)
# Same problem, but aggregating over m2m fields
results = (
Author.objects.values("name")
.annotate(age=Avg("friends__age"))
.order_by("name")
)
self.assertEqual(len(results), 9)
self.assertEqual(results[0]["name"], "Adrian Holovaty")
self.assertEqual(results[0]["age"], 32.0)
# Same problem, but colliding with an m2m field
results = (
Author.objects.values("name")
.annotate(friends=Count("friends"))
.order_by("name")
)
self.assertEqual(len(results), 9)
self.assertEqual(results[0]["name"], "Adrian Holovaty")
self.assertEqual(results[0]["friends"], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with a reverse-related name on the model raises ValueError
msg = "The annotation 'book_contact_set' conflicts with a field on the model."
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(book_contact_set=Avg("friends__age"))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count("authors"))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(),
[
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
"Practical Django Projects",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name,
)
# Regression for #10248 - Annotations work with dates()
qs = (
Book.objects.annotate(num_authors=Count("authors"))
.filter(num_authors=2)
.dates("pubdate", "day")
)
self.assertSequenceEqual(
qs,
[
datetime.date(1995, 1, 15),
datetime.date(2007, 12, 6),
],
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"sheets": "(pages + %s) / %s"}, select_params=[1, 2])
.order_by("sheets")
.values("sheets")
)
self.assertQuerysetEqual(
qs, [150, 175, 224, 264, 473, 566], lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values("publisher").annotate(Count("publisher")).count(), 4
)
self.assertEqual(
Book.objects.annotate(Count("publisher")).values("publisher").count(), 6
)
# Note: intentionally no order_by(), that case needs tests, too.
publishers = Publisher.objects.filter(id__in=[self.p1.id, self.p2.id])
self.assertEqual(sorted(p.name for p in publishers), ["Apress", "Sams"])
publishers = publishers.annotate(n_books=Count("book"))
sorted_publishers = sorted(publishers, key=lambda x: x.name)
self.assertEqual(sorted_publishers[0].n_books, 2)
self.assertEqual(sorted_publishers[1].n_books, 1)
self.assertEqual(sorted(p.name for p in publishers), ["Apress", "Sams"])
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books,
[
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name,
)
self.assertEqual(sorted(p.name for p in publishers), ["Apress", "Sams"])
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum("book_ptr__pages")),
{"n_pages": 2078},
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum("pages")),
{"n_pages": 2078},
)
qs = (
HardbackBook.objects.annotate(
n_authors=Count("book_ptr__authors"),
)
.values("name", "n_authors")
.order_by("name")
)
self.assertSequenceEqual(
qs,
[
{"n_authors": 2, "name": "Artificial Intelligence: A Modern Approach"},
{
"n_authors": 1,
"name": (
"Paradigms of Artificial Intelligence Programming: Case "
"Studies in Common Lisp"
),
},
],
)
qs = (
HardbackBook.objects.annotate(n_authors=Count("authors"))
.values("name", "n_authors")
.order_by("name")
)
self.assertSequenceEqual(
qs,
[
{"n_authors": 2, "name": "Artificial Intelligence: A Modern Approach"},
{
"n_authors": 1,
"name": (
"Paradigms of Artificial Intelligence Programming: Case "
"Studies in Common Lisp"
),
},
],
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
msg = "Cannot compute Avg('mean_age'): 'mean_age' is an aggregate"
with self.assertRaisesMessage(FieldError, msg):
Book.objects.annotate(mean_age=Avg("authors__age")).annotate(
Avg("mean_age")
)
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(), 0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[])
.annotate(Count("friends"))
.aggregate(Count("pk")),
{"pk__count": 0},
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg("age")), {"age__avg": None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name"))
.exclude(friends__name="Joe")
.count(),
Author.objects.count(),
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = (
Book.objects.values("name")
.annotate(n_authors=Count("authors"))
.filter(pages__lt=F("n_authors") * 200)
.values_list("pk")
)
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs),
["Python Web Development with Django"],
attrgetter("name"),
)
def test_values_annotate_values(self):
qs = (
Book.objects.values("name")
.annotate(n_authors=Count("authors"))
.values_list("pk", flat=True)
.order_by("name")
)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# When a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = (
Book.objects.values_list("name")
.annotate(n_authors=Count("authors"))
.filter(pages__gt=F("n_authors"))
.values_list("name", flat=True)
.order_by("name")
)
# Results should be the same, all Books have more pages than authors
self.assertEqual(list(qs), list(Book.objects.values_list("name", flat=True)))
def test_values_list_annotation_args_ordering(self):
"""
Annotate *args ordering should be preserved in values_list results.
**kwargs comes after *args.
Regression test for #23659.
"""
books = (
Book.objects.values_list("publisher__name")
.annotate(
Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages")
)
.order_by("-publisher__name")
)
self.assertEqual(books[0], ("Sams", 1, Decimal("23.09"), 45.0, 528.0))
def test_annotation_disjunction(self):
qs = (
Book.objects.annotate(n_authors=Count("authors"))
.filter(Q(n_authors=2) | Q(name="Python Web Development with Django"))
.order_by("name")
)
self.assertQuerysetEqual(
qs,
[
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name"),
)
qs = (
Book.objects.annotate(n_authors=Count("authors")).filter(
Q(name="The Definitive Guide to Django: Web Development Done Right")
| (
Q(name="Artificial Intelligence: A Modern Approach")
& Q(n_authors=3)
)
)
).order_by("name")
self.assertQuerysetEqual(
qs,
[
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name"),
)
qs = (
Publisher.objects.annotate(
rating_sum=Sum("book__rating"), book_count=Count("book")
)
.filter(Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True))
.order_by("pk")
)
self.assertQuerysetEqual(
qs,
[
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name"),
)
qs = (
Publisher.objects.annotate(
rating_sum=Sum("book__rating"), book_count=Count("book")
)
.filter(Q(rating_sum__gt=F("book_count")) | Q(rating_sum=None))
.order_by("num_awards")
)
self.assertQuerysetEqual(
qs,
[
"Jonno's House of Books",
"Sams",
"Apress",
"Prentice Hall",
"Morgan Kaufmann",
],
attrgetter("name"),
)
def test_quoting_aggregate_order_by(self):
qs = (
Book.objects.filter(name="Python Web Development with Django")
.annotate(authorCount=Count("authors"))
.order_by("authorCount")
)
self.assertQuerysetEqual(
qs,
[
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount),
)
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev("pages")),
{"pages__stddev": Approximate(311.46, 1)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("rating")),
{"rating__stddev": Approximate(0.60, 1)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("price")),
{"price__stddev": Approximate(Decimal("24.16"), 2)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("pages", sample=True)),
{"pages__stddev": Approximate(341.19, 2)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("rating", sample=True)),
{"rating__stddev": Approximate(0.66, 2)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("price", sample=True)),
{"price__stddev": Approximate(Decimal("26.46"), 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("pages")),
{"pages__variance": Approximate(97010.80, 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("rating")),
{"rating__variance": Approximate(0.36, 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("price")),
{"price__variance": Approximate(Decimal("583.77"), 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("pages", sample=True)),
{"pages__variance": Approximate(116412.96, 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("rating", sample=True)),
{"rating__variance": Approximate(0.44, 2)},
)
self.assertEqual(
Book.objects.aggregate(Variance("price", sample=True)),
{"price__variance": Approximate(Decimal("700.53"), 2)},
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = (
Author.objects.annotate(book_cnt=Count("book"))
.filter(book_cnt=2)
.order_by("name")
)
self.assertQuerysetEqual(qs, ["Peter Norvig"], lambda b: b.name)
# Neither in this case
qs = (
Author.objects.annotate(book_count=Count("book"))
.filter(book_count=2)
.order_by("name")
)
self.assertQuerysetEqual(qs, ["Peter Norvig"], lambda b: b.name)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = (
Author.objects.annotate(Count("book"))
.filter(book__count=2)
.order_by("name")
)
self.assertQuerysetEqual(qs, ["Peter Norvig"], lambda b: b.name)
# Referencing the auto-generated name in an aggregate() also works.
self.assertEqual(
Author.objects.annotate(Count("book")).aggregate(Max("book__count")),
{"book__count__max": 2},
)
def test_annotate_joins(self):
"""
The base table's join isn't promoted to LOUTER. This could
cause the query generation to fail if there is an exclude() for fk-field
in the query, too. Refs #19087.
"""
qs = Book.objects.annotate(n=Count("pk"))
self.assertIs(qs.query.alias_map["aggregation_regress_book"].join_type, None)
# The query executes without problems.
self.assertEqual(len(qs.exclude(publisher=-1)), 6)
@skipUnlessAnyDBFeature("allows_group_by_pk", "allows_group_by_selected_pks")
def test_aggregate_duplicate_columns(self):
# Regression test for #17144
results = Author.objects.annotate(num_contacts=Count("book_contact_set"))
# There should only be one GROUP BY clause, for the `id` column.
# `name` and `age` should not be grouped on.
_, _, group_by = results.query.get_compiler(using="default").pre_sql_setup()
self.assertEqual(len(group_by), 1)
self.assertIn("id", group_by[0][0])
self.assertNotIn("name", group_by[0][0])
self.assertNotIn("age", group_by[0][0])
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by("name")],
[
("Adrian Holovaty", 1),
("Brad Dayley", 1),
("Jacob Kaplan-Moss", 0),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 0),
("Peter Norvig", 2),
("Stuart Russell", 0),
("Wesley J. Chun", 0),
],
)
@skipUnlessAnyDBFeature("allows_group_by_pk", "allows_group_by_selected_pks")
def test_aggregate_duplicate_columns_only(self):
# Works with only() too.
results = Author.objects.only("id", "name").annotate(
num_contacts=Count("book_contact_set")
)
_, _, grouping = results.query.get_compiler(using="default").pre_sql_setup()
self.assertEqual(len(grouping), 1)
self.assertIn("id", grouping[0][0])
self.assertNotIn("name", grouping[0][0])
self.assertNotIn("age", grouping[0][0])
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by("name")],
[
("Adrian Holovaty", 1),
("Brad Dayley", 1),
("Jacob Kaplan-Moss", 0),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 0),
("Peter Norvig", 2),
("Stuart Russell", 0),
("Wesley J. Chun", 0),
],
)
@skipUnlessAnyDBFeature("allows_group_by_pk", "allows_group_by_selected_pks")
def test_aggregate_duplicate_columns_select_related(self):
# And select_related()
results = Book.objects.select_related("contact").annotate(
num_authors=Count("authors")
)
_, _, grouping = results.query.get_compiler(using="default").pre_sql_setup()
# In the case of `group_by_selected_pks` we also group by contact.id
# because of the select_related.
self.assertEqual(
len(grouping), 1 if connection.features.allows_group_by_pk else 2
)
self.assertIn("id", grouping[0][0])
self.assertNotIn("name", grouping[0][0])
self.assertNotIn("contact", grouping[0][0])
self.assertEqual(
[(b.name, b.num_authors) for b in results.order_by("name")],
[
("Artificial Intelligence: A Modern Approach", 2),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
1,
),
("Practical Django Projects", 1),
("Python Web Development with Django", 3),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 2),
],
)
@skipUnlessDBFeature("allows_group_by_selected_pks")
def test_aggregate_unmanaged_model_columns(self):
"""
Unmanaged models are sometimes used to represent database views which
may not allow grouping by selected primary key.
"""
def assertQuerysetResults(queryset):
self.assertEqual(
[(b.name, b.num_authors) for b in queryset.order_by("name")],
[
("Artificial Intelligence: A Modern Approach", 2),
(
"Paradigms of Artificial Intelligence Programming: Case "
"Studies in Common Lisp",
1,
),
("Practical Django Projects", 1),
("Python Web Development with Django", 3),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 2),
],
)
queryset = Book.objects.select_related("contact").annotate(
num_authors=Count("authors")
)
# Unmanaged origin model.
with mock.patch.object(Book._meta, "managed", False):
_, _, grouping = queryset.query.get_compiler(
using="default"
).pre_sql_setup()
self.assertEqual(len(grouping), len(Book._meta.fields) + 1)
for index, field in enumerate(Book._meta.fields):
self.assertIn(field.name, grouping[index][0])
self.assertIn(Author._meta.pk.name, grouping[-1][0])
assertQuerysetResults(queryset)
# Unmanaged related model.
with mock.patch.object(Author._meta, "managed", False):
_, _, grouping = queryset.query.get_compiler(
using="default"
).pre_sql_setup()
self.assertEqual(len(grouping), len(Author._meta.fields) + 1)
self.assertIn(Book._meta.pk.name, grouping[0][0])
for index, field in enumerate(Author._meta.fields):
self.assertIn(field.name, grouping[index + 1][0])
assertQuerysetResults(queryset)
@skipUnlessDBFeature("allows_group_by_selected_pks")
def test_aggregate_unmanaged_model_as_tables(self):
qs = Book.objects.select_related("contact").annotate(
num_authors=Count("authors")
)
# Force treating unmanaged models as tables.
with mock.patch(
"django.db.connection.features.allows_group_by_selected_pks_on_model",
return_value=True,
):
with mock.patch.object(Book._meta, "managed", False), mock.patch.object(
Author._meta, "managed", False
):
_, _, grouping = qs.query.get_compiler(using="default").pre_sql_setup()
self.assertEqual(len(grouping), 2)
self.assertIn("id", grouping[0][0])
self.assertIn("id", grouping[1][0])
self.assertQuerysetEqual(
qs.order_by("name"),
[
("Artificial Intelligence: A Modern Approach", 2),
(
"Paradigms of Artificial Intelligence Programming: Case "
"Studies in Common Lisp",
1,
),
("Practical Django Projects", 1),
("Python Web Development with Django", 3),
("Sams Teach Yourself Django in 24 Hours", 1),
(
"The Definitive Guide to Django: Web Development Done "
"Right",
2,
),
],
attrgetter("name", "num_authors"),
)
def test_reverse_join_trimming(self):
qs = Author.objects.annotate(Count("book_contact_set__contact"))
self.assertIn(" JOIN ", str(qs.query))
def test_aggregation_with_generic_reverse_relation(self):
"""
Regression test for #10870: Aggregates with joins ignore extra
filters provided by setup_joins
tests aggregations with generic reverse relations
"""
django_book = Book.objects.get(name="Practical Django Projects")
ItemTag.objects.create(
object_id=django_book.id,
tag="intermediate",
content_type=ContentType.objects.get_for_model(django_book),
)
ItemTag.objects.create(
object_id=django_book.id,
tag="django",
content_type=ContentType.objects.get_for_model(django_book),
)
# Assign a tag to model with same PK as the book above. If the JOIN
# used in aggregation doesn't have content type as part of the
# condition the annotation will also count the 'hi mom' tag for b.
wmpk = WithManualPK.objects.create(id=django_book.pk)
ItemTag.objects.create(
object_id=wmpk.id,
tag="hi mom",
content_type=ContentType.objects.get_for_model(wmpk),
)
ai_book = Book.objects.get(
name__startswith="Paradigms of Artificial Intelligence"
)
ItemTag.objects.create(
object_id=ai_book.id,
tag="intermediate",
content_type=ContentType.objects.get_for_model(ai_book),
)
self.assertEqual(Book.objects.aggregate(Count("tags")), {"tags__count": 3})
results = Book.objects.annotate(Count("tags")).order_by("-tags__count", "name")
self.assertEqual(
[(b.name, b.tags__count) for b in results],
[
("Practical Django Projects", 2),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
1,
),
("Artificial Intelligence: A Modern Approach", 0),
("Python Web Development with Django", 0),
("Sams Teach Yourself Django in 24 Hours", 0),
("The Definitive Guide to Django: Web Development Done Right", 0),
],
)
def test_negated_aggregation(self):
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count("book")).filter(book_cnt=2)
).order_by("name")
expected_results = [a.name for a in expected_results]
qs = (
Author.objects.annotate(book_cnt=Count("book"))
.exclude(Q(book_cnt=2), Q(book_cnt=2))
.order_by("name")
)
self.assertQuerysetEqual(qs, expected_results, lambda b: b.name)
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count("book")).filter(book_cnt=2)
).order_by("name")
expected_results = [a.name for a in expected_results]
qs = (
Author.objects.annotate(book_cnt=Count("book"))
.exclude(Q(book_cnt=2) | Q(book_cnt=2))
.order_by("name")
)
self.assertQuerysetEqual(qs, expected_results, lambda b: b.name)
def test_name_filters(self):
qs = (
Author.objects.annotate(Count("book"))
.filter(Q(book__count__exact=2) | Q(name="Adrian Holovaty"))
.order_by("name")
)
self.assertQuerysetEqual(
qs, ["Adrian Holovaty", "Peter Norvig"], lambda b: b.name
)
def test_name_expressions(self):
# Aggregates are spotted correctly from F objects.
# Note that Adrian's age is 34 in the fixtures, and he has one book
# so both conditions match one author.
qs = (
Author.objects.annotate(Count("book"))
.filter(Q(name="Peter Norvig") | Q(age=F("book__count") + 33))
.order_by("name")
)
self.assertQuerysetEqual(
qs, ["Adrian Holovaty", "Peter Norvig"], lambda b: b.name
)
def test_filter_aggregates_or_connector(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count("authors")).filter(q1 | q2).order_by("pk")
self.assertQuerysetEqual(
query,
[self.b1.pk, self.b4.pk, self.b5.pk, self.b6.pk],
attrgetter("pk"),
)
def test_filter_aggregates_negated_and_connector(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = (
Book.objects.annotate(Count("authors")).filter(~(q1 & q2)).order_by("pk")
)
self.assertQuerysetEqual(
query,
[self.b1.pk, self.b2.pk, self.b3.pk, self.b4.pk, self.b6.pk],
attrgetter("pk"),
)
def test_filter_aggregates_xor_connector(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count("authors")).filter(q1 ^ q2).order_by("pk")
self.assertQuerysetEqual(
query,
[self.b1.pk, self.b4.pk, self.b6.pk],
attrgetter("pk"),
)
def test_filter_aggregates_negated_xor_connector(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = (
Book.objects.annotate(Count("authors")).filter(~(q1 ^ q2)).order_by("pk")
)
self.assertQuerysetEqual(
query,
[self.b2.pk, self.b3.pk, self.b5.pk],
attrgetter("pk"),
)
def test_ticket_11293_q_immutable(self):
"""
Splitting a q object to parts for where/having doesn't alter
the original q-object.
"""
q1 = Q(isbn="")
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count("authors"))
query.filter(q1 | q2)
self.assertEqual(len(q2.children), 1)
def test_fobj_group_by(self):
"""
An F() object referring to related column works correctly in group by.
"""
qs = Book.objects.annotate(account=Count("authors")).filter(
account=F("publisher__num_awards")
)
self.assertQuerysetEqual(
qs, ["Sams Teach Yourself Django in 24 Hours"], lambda b: b.name
)
def test_annotate_reserved_word(self):
"""
Regression #18333 - Ensure annotated column name is properly quoted.
"""
vals = Book.objects.annotate(select=Count("authors__id")).aggregate(
Sum("select"), Avg("select")
)
self.assertEqual(
vals,
{
"select__sum": 10,
"select__avg": Approximate(1.666, places=2),
},
)
def test_annotate_on_relation(self):
book = Book.objects.annotate(
avg_price=Avg("price"), publisher_name=F("publisher__name")
).get(pk=self.b1.pk)
self.assertEqual(book.avg_price, 30.00)
self.assertEqual(book.publisher_name, "Apress")
def test_aggregate_on_relation(self):
# A query with an existing annotation aggregation on a relation should
# succeed.
qs = Book.objects.annotate(avg_price=Avg("price")).aggregate(
publisher_awards=Sum("publisher__num_awards")
)
self.assertEqual(qs["publisher_awards"], 30)
def test_annotate_distinct_aggregate(self):
# There are three books with rating of 4.0 and two of the books have
# the same price. Hence, the distinct removes one rating of 4.0
# from the results.
vals1 = (
Book.objects.values("rating", "price")
.distinct()
.aggregate(result=Sum("rating"))
)
vals2 = Book.objects.aggregate(result=Sum("rating") - Value(4.0))
self.assertEqual(vals1, vals2)
def test_annotate_values_list_flat(self):
"""Find ages that are shared by at least two authors."""
qs = (
Author.objects.values_list("age", flat=True)
.annotate(age_count=Count("age"))
.filter(age_count__gt=1)
)
self.assertSequenceEqual(qs, [29])
def test_allow_distinct(self):
class MyAggregate(Aggregate):
pass
with self.assertRaisesMessage(TypeError, "MyAggregate does not allow distinct"):
MyAggregate("foo", distinct=True)
class DistinctAggregate(Aggregate):
allow_distinct = True
DistinctAggregate("foo", distinct=True)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_having_subquery_select(self):
authors = Author.objects.filter(pk=self.a1.pk)
books = Book.objects.annotate(Count("authors")).filter(
Q(authors__in=authors) | Q(authors__count__gt=2)
)
self.assertEqual(set(books), {self.b1, self.b4})
class JoinPromotionTests(TestCase):
def test_ticket_21150(self):
b = Bravo.objects.create()
c = Charlie.objects.create(bravo=b)
qs = Charlie.objects.select_related("alfa").annotate(Count("bravo__charlie"))
self.assertSequenceEqual(qs, [c])
self.assertIs(qs[0].alfa, None)
a = Alfa.objects.create()
c.alfa = a
c.save()
# Force re-evaluation
qs = qs.all()
self.assertSequenceEqual(qs, [c])
self.assertEqual(qs[0].alfa, a)
def test_existing_join_not_promoted(self):
# No promotion for existing joins
qs = Charlie.objects.filter(alfa__name__isnull=False).annotate(
Count("alfa__name")
)
self.assertIn(" INNER JOIN ", str(qs.query))
# Also, the existing join is unpromoted when doing filtering for already
# promoted join.
qs = Charlie.objects.annotate(Count("alfa__name")).filter(
alfa__name__isnull=False
)
self.assertIn(" INNER JOIN ", str(qs.query))
# But, as the join is nullable first use by annotate will be LOUTER
qs = Charlie.objects.annotate(Count("alfa__name"))
self.assertIn(" LEFT OUTER JOIN ", str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = Book.objects.annotate(Count("contact__name"))
self.assertIn(" INNER JOIN ", str(qs.query))
class SelfReferentialFKTests(TestCase):
def test_ticket_24748(self):
t1 = SelfRefFK.objects.create(name="t1")
SelfRefFK.objects.create(name="t2", parent=t1)
SelfRefFK.objects.create(name="t3", parent=t1)
self.assertQuerysetEqual(
SelfRefFK.objects.annotate(num_children=Count("children")).order_by("name"),
[("t1", 2), ("t2", 0), ("t3", 0)],
lambda x: (x.name, x.num_children),
)
|
f0b3439fbf15b9ce6e920c014df2fbaa1ff013f4fd53e2499bb0822b920c0599 | import decimal
import enum
import json
import unittest
import uuid
from django import forms
from django.contrib.admin.utils import display_for_field
from django.core import checks, exceptions, serializers, validators
from django.core.exceptions import FieldError
from django.core.management import call_command
from django.db import IntegrityError, connection, models
from django.db.models.expressions import Exists, OuterRef, RawSQL, Value
from django.db.models.functions import Cast, JSONObject, Upper
from django.test import (
TransactionTestCase,
modify_settings,
override_settings,
skipUnlessDBFeature,
)
from django.test.utils import isolate_apps
from django.utils import timezone
from . import PostgreSQLSimpleTestCase, PostgreSQLTestCase, PostgreSQLWidgetTestCase
from .models import (
ArrayEnumModel,
ArrayFieldSubclass,
CharArrayModel,
DateTimeArrayModel,
IntegerArrayModel,
NestedIntegerArrayModel,
NullableIntegerArrayModel,
OtherTypesArrayModel,
PostgreSQLModel,
Tag,
)
try:
from psycopg2.extras import NumericRange
from django.contrib.postgres.aggregates import ArrayAgg
from django.contrib.postgres.expressions import ArraySubquery
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.fields.array import IndexTransform, SliceTransform
from django.contrib.postgres.forms import (
SimpleArrayField,
SplitArrayField,
SplitArrayWidget,
)
except ImportError:
pass
@isolate_apps("postgres_tests")
class BasicTests(PostgreSQLSimpleTestCase):
def test_get_field_display(self):
class MyModel(PostgreSQLModel):
field = ArrayField(
models.CharField(max_length=16),
choices=[
["Media", [(["vinyl", "cd"], "Audio")]],
(("mp3", "mp4"), "Digital"),
],
)
tests = (
(["vinyl", "cd"], "Audio"),
(("mp3", "mp4"), "Digital"),
(("a", "b"), "('a', 'b')"),
(["c", "d"], "['c', 'd']"),
)
for value, display in tests:
with self.subTest(value=value, display=display):
instance = MyModel(field=value)
self.assertEqual(instance.get_field_display(), display)
def test_get_field_display_nested_array(self):
class MyModel(PostgreSQLModel):
field = ArrayField(
ArrayField(models.CharField(max_length=16)),
choices=[
[
"Media",
[([["vinyl", "cd"], ("x",)], "Audio")],
],
((["mp3"], ("mp4",)), "Digital"),
],
)
tests = (
([["vinyl", "cd"], ("x",)], "Audio"),
((["mp3"], ("mp4",)), "Digital"),
((("a", "b"), ("c",)), "(('a', 'b'), ('c',))"),
([["a", "b"], ["c"]], "[['a', 'b'], ['c']]"),
)
for value, display in tests:
with self.subTest(value=value, display=display):
instance = MyModel(field=value)
self.assertEqual(instance.get_field_display(), display)
class TestSaveLoad(PostgreSQLTestCase):
def test_integer(self):
instance = IntegerArrayModel(field=[1, 2, 3])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_char(self):
instance = CharArrayModel(field=["hello", "goodbye"])
instance.save()
loaded = CharArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_dates(self):
instance = DateTimeArrayModel(
datetimes=[timezone.now()],
dates=[timezone.now().date()],
times=[timezone.now().time()],
)
instance.save()
loaded = DateTimeArrayModel.objects.get()
self.assertEqual(instance.datetimes, loaded.datetimes)
self.assertEqual(instance.dates, loaded.dates)
self.assertEqual(instance.times, loaded.times)
def test_tuples(self):
instance = IntegerArrayModel(field=(1,))
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertSequenceEqual(instance.field, loaded.field)
def test_integers_passed_as_strings(self):
# This checks that get_prep_value is deferred properly
instance = IntegerArrayModel(field=["1"])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(loaded.field, [1])
def test_default_null(self):
instance = NullableIntegerArrayModel()
instance.save()
loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk)
self.assertIsNone(loaded.field)
self.assertEqual(instance.field, loaded.field)
def test_null_handling(self):
instance = NullableIntegerArrayModel(field=None)
instance.save()
loaded = NullableIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
instance = IntegerArrayModel(field=None)
with self.assertRaises(IntegrityError):
instance.save()
def test_nested(self):
instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]])
instance.save()
loaded = NestedIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_other_array_types(self):
instance = OtherTypesArrayModel(
ips=["192.168.0.1", "::1"],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=[Tag(1), Tag(2), Tag(3)],
json=[{"a": 1}, {"b": 2}],
int_ranges=[NumericRange(10, 20), NumericRange(30, 40)],
bigint_ranges=[
NumericRange(7000000000, 10000000000),
NumericRange(50000000000, 70000000000),
],
)
instance.save()
loaded = OtherTypesArrayModel.objects.get()
self.assertEqual(instance.ips, loaded.ips)
self.assertEqual(instance.uuids, loaded.uuids)
self.assertEqual(instance.decimals, loaded.decimals)
self.assertEqual(instance.tags, loaded.tags)
self.assertEqual(instance.json, loaded.json)
self.assertEqual(instance.int_ranges, loaded.int_ranges)
self.assertEqual(instance.bigint_ranges, loaded.bigint_ranges)
def test_null_from_db_value_handling(self):
instance = OtherTypesArrayModel.objects.create(
ips=["192.168.0.1", "::1"],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=None,
)
instance.refresh_from_db()
self.assertIsNone(instance.tags)
self.assertEqual(instance.json, [])
self.assertIsNone(instance.int_ranges)
self.assertIsNone(instance.bigint_ranges)
def test_model_set_on_base_field(self):
instance = IntegerArrayModel()
field = instance._meta.get_field("field")
self.assertEqual(field.model, IntegerArrayModel)
self.assertEqual(field.base_field.model, IntegerArrayModel)
def test_nested_nullable_base_field(self):
instance = NullableIntegerArrayModel.objects.create(
field_nested=[[None, None], [None, None]],
)
self.assertEqual(instance.field_nested, [[None, None], [None, None]])
class TestQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
cls.objs = NullableIntegerArrayModel.objects.bulk_create(
[
NullableIntegerArrayModel(order=1, field=[1]),
NullableIntegerArrayModel(order=2, field=[2]),
NullableIntegerArrayModel(order=3, field=[2, 3]),
NullableIntegerArrayModel(order=4, field=[20, 30, 40]),
NullableIntegerArrayModel(order=5, field=None),
]
)
def test_empty_list(self):
NullableIntegerArrayModel.objects.create(field=[])
obj = (
NullableIntegerArrayModel.objects.annotate(
empty_array=models.Value(
[], output_field=ArrayField(models.IntegerField())
),
)
.filter(field=models.F("empty_array"))
.get()
)
self.assertEqual(obj.field, [])
self.assertEqual(obj.empty_array, [])
def test_exact(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[1]), self.objs[:1]
)
def test_exact_with_expression(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[Value(1)]),
self.objs[:1],
)
def test_exact_charfield(self):
instance = CharArrayModel.objects.create(field=["text"])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field=["text"]), [instance]
)
def test_exact_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field=[[1, 2], [3, 4]]), [instance]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__isnull=True), self.objs[-1:]
)
def test_gt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__gt=[0]), self.objs[:4]
)
def test_lt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__lt=[2]), self.objs[:1]
)
def test_in(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]),
self.objs[:2],
)
def test_in_subquery(self):
IntegerArrayModel.objects.create(field=[2, 3])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__in=IntegerArrayModel.objects.values_list("field", flat=True)
),
self.objs[2:3],
)
@unittest.expectedFailure
def test_in_including_F_object(self):
# This test asserts that Array objects passed to filters can be
# constructed to contain F objects. This currently doesn't work as the
# psycopg2 mogrify method that generates the ARRAY() syntax is
# expecting literals, not column references (#27095).
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[models.F("id")]]),
self.objs[:2],
)
def test_in_as_F_object(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[models.F("field")]),
self.objs[:4],
)
def test_contained_by(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]),
self.objs[:2],
)
def test_contained_by_including_F_object(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__contained_by=[models.F("order"), 2]
),
self.objs[:3],
)
def test_contains(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=[2]),
self.objs[1:3],
)
def test_contains_subquery(self):
IntegerArrayModel.objects.create(field=[2, 3])
inner_qs = IntegerArrayModel.objects.values_list("field", flat=True)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=inner_qs[:1]),
self.objs[2:3],
)
inner_qs = IntegerArrayModel.objects.filter(field__contains=OuterRef("field"))
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(Exists(inner_qs)),
self.objs[1:3],
)
def test_contains_including_expression(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__contains=[2, Value(6) / Value(2)],
),
self.objs[2:3],
)
def test_icontains(self):
# Using the __icontains lookup with ArrayField is inefficient.
instance = CharArrayModel.objects.create(field=["FoO"])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__icontains="foo"), [instance]
)
def test_contains_charfield(self):
# Regression for #22907
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contains=["text"]), []
)
def test_contained_by_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contained_by=["text"]), []
)
def test_overlap_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__overlap=["text"]), []
)
def test_overlap_charfield_including_expression(self):
obj_1 = CharArrayModel.objects.create(field=["TEXT", "lower text"])
obj_2 = CharArrayModel.objects.create(field=["lower text", "TEXT"])
CharArrayModel.objects.create(field=["lower text", "text"])
self.assertSequenceEqual(
CharArrayModel.objects.filter(
field__overlap=[
Upper(Value("text")),
"other",
]
),
[obj_1, obj_2],
)
def test_lookups_autofield_array(self):
qs = (
NullableIntegerArrayModel.objects.filter(
field__0__isnull=False,
)
.values("field__0")
.annotate(
arrayagg=ArrayAgg("id"),
)
.order_by("field__0")
)
tests = (
("contained_by", [self.objs[1].pk, self.objs[2].pk, 0], [2]),
("contains", [self.objs[2].pk], [2]),
("exact", [self.objs[3].pk], [20]),
("overlap", [self.objs[1].pk, self.objs[3].pk], [2, 20]),
)
for lookup, value, expected in tests:
with self.subTest(lookup=lookup):
self.assertSequenceEqual(
qs.filter(
**{"arrayagg__" + lookup: value},
).values_list("field__0", flat=True),
expected,
)
@skipUnlessDBFeature("allows_group_by_refs")
def test_group_by_order_by_aliases(self):
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__0__isnull=False,
)
.values("field__0")
.annotate(arrayagg=ArrayAgg("id"))
.order_by("field__0"),
[
{"field__0": 1, "arrayagg": [1]},
{"field__0": 2, "arrayagg": [2, 3]},
{"field__0": 20, "arrayagg": [4]},
],
)
alias = connection.ops.quote_name("field__0")
sql = ctx[0]["sql"]
self.assertIn(f"GROUP BY {alias}", sql)
self.assertIn(f"ORDER BY {alias}", sql)
def test_index(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0=2), self.objs[1:3]
)
def test_index_chained(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0__lt=3), self.objs[0:3]
)
def test_index_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0=1), [instance]
)
@unittest.expectedFailure
def test_index_used_on_nested_data(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0=[1, 2]), [instance]
)
def test_index_transform_expression(self):
expr = RawSQL("string_to_array(%s, ';')", ["1;2"])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__0=Cast(
IndexTransform(1, models.IntegerField, expr),
output_field=models.IntegerField(),
),
),
self.objs[:1],
)
def test_index_annotation(self):
qs = NullableIntegerArrayModel.objects.annotate(second=models.F("field__1"))
self.assertCountEqual(
qs.values_list("second", flat=True),
[None, None, None, 3, 30],
)
def test_overlap(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]),
self.objs[0:3],
)
def test_len(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len__lte=2), self.objs[0:3]
)
def test_len_empty_array(self):
obj = NullableIntegerArrayModel.objects.create(field=[])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len=0), [obj]
)
def test_slice(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_1=[2]), self.objs[1:3]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]), self.objs[2:3]
)
def test_order_by_slice(self):
more_objs = (
NullableIntegerArrayModel.objects.create(field=[1, 637]),
NullableIntegerArrayModel.objects.create(field=[2, 1]),
NullableIntegerArrayModel.objects.create(field=[3, -98123]),
NullableIntegerArrayModel.objects.create(field=[4, 2]),
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.order_by("field__1"),
[
more_objs[2],
more_objs[1],
more_objs[3],
self.objs[2],
self.objs[3],
more_objs[0],
self.objs[4],
self.objs[1],
self.objs[0],
],
)
@unittest.expectedFailure
def test_slice_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]), [instance]
)
def test_slice_transform_expression(self):
expr = RawSQL("string_to_array(%s, ';')", ["9;2;3"])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__0_2=SliceTransform(2, 3, expr)
),
self.objs[2:3],
)
def test_slice_annotation(self):
qs = NullableIntegerArrayModel.objects.annotate(
first_two=models.F("field__0_2"),
)
self.assertCountEqual(
qs.values_list("first_two", flat=True),
[None, [1], [2], [2, 3], [20, 30]],
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
id__in=NullableIntegerArrayModel.objects.filter(field__len=3)
),
[self.objs[3]],
)
def test_enum_lookup(self):
class TestEnum(enum.Enum):
VALUE_1 = "value_1"
instance = ArrayEnumModel.objects.create(array_of_enums=[TestEnum.VALUE_1])
self.assertSequenceEqual(
ArrayEnumModel.objects.filter(array_of_enums__contains=[TestEnum.VALUE_1]),
[instance],
)
def test_unsupported_lookup(self):
msg = (
"Unsupported lookup '0_bar' for ArrayField or join on the field not "
"permitted."
)
with self.assertRaisesMessage(FieldError, msg):
list(NullableIntegerArrayModel.objects.filter(field__0_bar=[2]))
msg = (
"Unsupported lookup '0bar' for ArrayField or join on the field not "
"permitted."
)
with self.assertRaisesMessage(FieldError, msg):
list(NullableIntegerArrayModel.objects.filter(field__0bar=[2]))
def test_grouping_by_annotations_with_array_field_param(self):
value = models.Value([1], output_field=ArrayField(models.IntegerField()))
self.assertEqual(
NullableIntegerArrayModel.objects.annotate(
array_length=models.Func(
value,
1,
function="ARRAY_LENGTH",
output_field=models.IntegerField(),
),
)
.values("array_length")
.annotate(
count=models.Count("pk"),
)
.get()["array_length"],
1,
)
def test_filter_by_array_subquery(self):
inner_qs = NullableIntegerArrayModel.objects.filter(
field__len=models.OuterRef("field__len"),
).values("field")
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.alias(
same_sized_fields=ArraySubquery(inner_qs),
).filter(same_sized_fields__len__gt=1),
self.objs[0:2],
)
def test_annotated_array_subquery(self):
inner_qs = NullableIntegerArrayModel.objects.exclude(
pk=models.OuterRef("pk")
).values("order")
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.annotate(
sibling_ids=ArraySubquery(inner_qs),
)
.get(order=1)
.sibling_ids,
[2, 3, 4, 5],
)
def test_group_by_with_annotated_array_subquery(self):
inner_qs = NullableIntegerArrayModel.objects.exclude(
pk=models.OuterRef("pk")
).values("order")
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.annotate(
sibling_ids=ArraySubquery(inner_qs),
sibling_count=models.Max("sibling_ids__len"),
).values_list("sibling_count", flat=True),
[len(self.objs) - 1] * len(self.objs),
)
def test_annotated_ordered_array_subquery(self):
inner_qs = NullableIntegerArrayModel.objects.order_by("-order").values("order")
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.annotate(
ids=ArraySubquery(inner_qs),
)
.first()
.ids,
[5, 4, 3, 2, 1],
)
def test_annotated_array_subquery_with_json_objects(self):
inner_qs = NullableIntegerArrayModel.objects.exclude(
pk=models.OuterRef("pk")
).values(json=JSONObject(order="order", field="field"))
siblings_json = (
NullableIntegerArrayModel.objects.annotate(
siblings_json=ArraySubquery(inner_qs),
)
.values_list("siblings_json", flat=True)
.get(order=1)
)
self.assertSequenceEqual(
siblings_json,
[
{"field": [2], "order": 2},
{"field": [2, 3], "order": 3},
{"field": [20, 30, 40], "order": 4},
{"field": None, "order": 5},
],
)
class TestDateTimeExactQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
now = timezone.now()
cls.datetimes = [now]
cls.dates = [now.date()]
cls.times = [now.time()]
cls.objs = [
DateTimeArrayModel.objects.create(
datetimes=cls.datetimes, dates=cls.dates, times=cls.times
),
]
def test_exact_datetimes(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(datetimes=self.datetimes), self.objs
)
def test_exact_dates(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(dates=self.dates), self.objs
)
def test_exact_times(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(times=self.times), self.objs
)
class TestOtherTypesExactQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
cls.ips = ["192.168.0.1", "::1"]
cls.uuids = [uuid.uuid4()]
cls.decimals = [decimal.Decimal(1.25), 1.75]
cls.tags = [Tag(1), Tag(2), Tag(3)]
cls.objs = [
OtherTypesArrayModel.objects.create(
ips=cls.ips,
uuids=cls.uuids,
decimals=cls.decimals,
tags=cls.tags,
)
]
def test_exact_ip_addresses(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(ips=self.ips), self.objs
)
def test_exact_uuids(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(uuids=self.uuids), self.objs
)
def test_exact_decimals(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(decimals=self.decimals), self.objs
)
def test_exact_tags(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(tags=self.tags), self.objs
)
@isolate_apps("postgres_tests")
class TestChecks(PostgreSQLSimpleTestCase):
def test_field_checks(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.CharField())
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, "postgres.E001")
self.assertIn("max_length", errors[0].msg)
def test_invalid_base_fields(self):
class MyModel(PostgreSQLModel):
field = ArrayField(
models.ManyToManyField("postgres_tests.IntegerArrayModel")
)
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, "postgres.E002")
def test_invalid_default(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), default=[])
model = MyModel()
self.assertEqual(
model.check(),
[
checks.Warning(
msg=(
"ArrayField default should be a callable instead of an "
"instance so that it's not shared between all field "
"instances."
),
hint="Use a callable instead, e.g., use `list` instead of `[]`.",
obj=MyModel._meta.get_field("field"),
id="fields.E010",
)
],
)
def test_valid_default(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), default=list)
model = MyModel()
self.assertEqual(model.check(), [])
def test_valid_default_none(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), default=None)
model = MyModel()
self.assertEqual(model.check(), [])
def test_nested_field_checks(self):
"""
Nested ArrayFields are permitted.
"""
class MyModel(PostgreSQLModel):
field = ArrayField(ArrayField(models.CharField()))
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, "postgres.E001")
self.assertIn("max_length", errors[0].msg)
def test_choices_tuple_list(self):
class MyModel(PostgreSQLModel):
field = ArrayField(
models.CharField(max_length=16),
choices=[
[
"Media",
[(["vinyl", "cd"], "Audio"), (("vhs", "dvd"), "Video")],
],
(["mp3", "mp4"], "Digital"),
],
)
self.assertEqual(MyModel._meta.get_field("field").check(), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific tests")
class TestMigrations(TransactionTestCase):
available_apps = ["postgres_tests"]
def test_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(type(new.base_field), type(field.base_field))
self.assertIsNot(new.base_field, field.base_field)
def test_deconstruct_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.size, field.size)
def test_deconstruct_args(self):
field = ArrayField(models.CharField(max_length=20))
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.base_field.max_length, field.base_field.max_length)
def test_subclass_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.contrib.postgres.fields.ArrayField")
field = ArrayFieldSubclass()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "postgres_tests.models.ArrayFieldSubclass")
@override_settings(
MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_default_migrations",
}
)
def test_adding_field_with_default(self):
# See #22962
table_name = "postgres_tests_integerarraydefaultmodel"
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
call_command("migrate", "postgres_tests", verbosity=0)
with connection.cursor() as cursor:
self.assertIn(table_name, connection.introspection.table_names(cursor))
call_command("migrate", "postgres_tests", "zero", verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
@override_settings(
MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_index_migrations",
}
)
def test_adding_arrayfield_with_index(self):
"""
ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops indexes.
"""
table_name = "postgres_tests_chartextarrayindexmodel"
call_command("migrate", "postgres_tests", verbosity=0)
with connection.cursor() as cursor:
like_constraint_columns_list = [
v["columns"]
for k, v in list(
connection.introspection.get_constraints(cursor, table_name).items()
)
if k.endswith("_like")
]
# Only the CharField should have a LIKE index.
self.assertEqual(like_constraint_columns_list, [["char2"]])
# All fields should have regular indexes.
with connection.cursor() as cursor:
indexes = [
c["columns"][0]
for c in connection.introspection.get_constraints(
cursor, table_name
).values()
if c["index"] and len(c["columns"]) == 1
]
self.assertIn("char", indexes)
self.assertIn("char2", indexes)
self.assertIn("text", indexes)
call_command("migrate", "postgres_tests", "zero", verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
class TestSerialization(PostgreSQLSimpleTestCase):
test_data = (
'[{"fields": {"field": "[\\"1\\", \\"2\\", null]"}, '
'"model": "postgres_tests.integerarraymodel", "pk": null}]'
)
def test_dumping(self):
instance = IntegerArrayModel(field=[1, 2, None])
data = serializers.serialize("json", [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize("json", self.test_data))[0].object
self.assertEqual(instance.field, [1, 2, None])
class TestValidation(PostgreSQLSimpleTestCase):
def test_unbounded(self):
field = ArrayField(models.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, None], None)
self.assertEqual(cm.exception.code, "item_invalid")
self.assertEqual(
cm.exception.message % cm.exception.params,
"Item 2 in the array did not validate: This field cannot be null.",
)
def test_blank_true(self):
field = ArrayField(models.IntegerField(blank=True, null=True))
# This should not raise a validation error
field.clean([1, None], None)
def test_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
field.clean([1, 2, 3], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, 2, 3, 4], None)
self.assertEqual(
cm.exception.messages[0],
"List contains 4 items, it should contain no more than 3.",
)
def test_nested_array_mismatch(self):
field = ArrayField(ArrayField(models.IntegerField()))
field.clean([[1, 2], [3, 4]], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([[1, 2], [3, 4, 5]], None)
self.assertEqual(cm.exception.code, "nested_array_mismatch")
self.assertEqual(
cm.exception.messages[0], "Nested arrays must have the same length."
)
def test_with_base_field_error_params(self):
field = ArrayField(models.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(["abc"], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
"Item 1 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 3).",
)
self.assertEqual(exception.code, "item_invalid")
self.assertEqual(
exception.params,
{"nth": 1, "value": "abc", "limit_value": 2, "show_value": 3},
)
def test_with_validators(self):
field = ArrayField(
models.IntegerField(validators=[validators.MinValueValidator(1)])
)
field.clean([1, 2], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([0], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
"Item 1 in the array did not validate: Ensure this value is greater than "
"or equal to 1.",
)
self.assertEqual(exception.code, "item_invalid")
self.assertEqual(
exception.params, {"nth": 1, "value": 0, "limit_value": 1, "show_value": 0}
)
class TestSimpleFormField(PostgreSQLSimpleTestCase):
def test_valid(self):
field = SimpleArrayField(forms.CharField())
value = field.clean("a,b,c")
self.assertEqual(value, ["a", "b", "c"])
def test_to_python_fail(self):
field = SimpleArrayField(forms.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("a,b,9")
self.assertEqual(
cm.exception.messages[0],
"Item 1 in the array did not validate: Enter a whole number.",
)
def test_validate_fail(self):
field = SimpleArrayField(forms.CharField(required=True))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("a,b,")
self.assertEqual(
cm.exception.messages[0],
"Item 3 in the array did not validate: This field is required.",
)
def test_validate_fail_base_field_error_params(self):
field = SimpleArrayField(forms.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("abc,c,defg")
errors = cm.exception.error_list
self.assertEqual(len(errors), 2)
first_error = errors[0]
self.assertEqual(
first_error.message,
"Item 1 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 3).",
)
self.assertEqual(first_error.code, "item_invalid")
self.assertEqual(
first_error.params,
{"nth": 1, "value": "abc", "limit_value": 2, "show_value": 3},
)
second_error = errors[1]
self.assertEqual(
second_error.message,
"Item 3 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 4).",
)
self.assertEqual(second_error.code, "item_invalid")
self.assertEqual(
second_error.params,
{"nth": 3, "value": "defg", "limit_value": 2, "show_value": 4},
)
def test_validators_fail(self):
field = SimpleArrayField(forms.RegexField("[a-e]{2}"))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("a,bc,de")
self.assertEqual(
cm.exception.messages[0],
"Item 1 in the array did not validate: Enter a valid value.",
)
def test_delimiter(self):
field = SimpleArrayField(forms.CharField(), delimiter="|")
value = field.clean("a|b|c")
self.assertEqual(value, ["a", "b", "c"])
def test_delimiter_with_nesting(self):
field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter="|")
value = field.clean("a,b|c,d")
self.assertEqual(value, [["a", "b"], ["c", "d"]])
def test_prepare_value(self):
field = SimpleArrayField(forms.CharField())
value = field.prepare_value(["a", "b", "c"])
self.assertEqual(value, "a,b,c")
def test_max_length(self):
field = SimpleArrayField(forms.CharField(), max_length=2)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("a,b,c")
self.assertEqual(
cm.exception.messages[0],
"List contains 3 items, it should contain no more than 2.",
)
def test_min_length(self):
field = SimpleArrayField(forms.CharField(), min_length=4)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("a,b,c")
self.assertEqual(
cm.exception.messages[0],
"List contains 3 items, it should contain no fewer than 4.",
)
def test_required(self):
field = SimpleArrayField(forms.CharField(), required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("")
self.assertEqual(cm.exception.messages[0], "This field is required.")
def test_model_field_formfield(self):
model_field = ArrayField(models.CharField(max_length=27))
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertIsInstance(form_field.base_field, forms.CharField)
self.assertEqual(form_field.base_field.max_length, 27)
def test_model_field_formfield_size(self):
model_field = ArrayField(models.CharField(max_length=27), size=4)
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertEqual(form_field.max_length, 4)
def test_model_field_choices(self):
model_field = ArrayField(models.IntegerField(choices=((1, "A"), (2, "B"))))
form_field = model_field.formfield()
self.assertEqual(form_field.clean("1,2"), [1, 2])
def test_already_converted_value(self):
field = SimpleArrayField(forms.CharField())
vals = ["a", "b", "c"]
self.assertEqual(field.clean(vals), vals)
def test_has_changed(self):
field = SimpleArrayField(forms.IntegerField())
self.assertIs(field.has_changed([1, 2], [1, 2]), False)
self.assertIs(field.has_changed([1, 2], "1,2"), False)
self.assertIs(field.has_changed([1, 2], "1,2,3"), True)
self.assertIs(field.has_changed([1, 2], "a,b"), True)
def test_has_changed_empty(self):
field = SimpleArrayField(forms.CharField())
self.assertIs(field.has_changed(None, None), False)
self.assertIs(field.has_changed(None, ""), False)
self.assertIs(field.has_changed(None, []), False)
self.assertIs(field.has_changed([], None), False)
self.assertIs(field.has_changed([], ""), False)
class TestSplitFormField(PostgreSQLSimpleTestCase):
def test_valid(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {"array_0": "a", "array_1": "b", "array_2": "c"}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {"array": ["a", "b", "c"]})
def test_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), required=True, size=3)
data = {"array_0": "", "array_1": "", "array_2": ""}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {"array": ["This field is required."]})
def test_remove_trailing_nulls(self):
class SplitForm(forms.Form):
array = SplitArrayField(
forms.CharField(required=False), size=5, remove_trailing_nulls=True
)
data = {
"array_0": "a",
"array_1": "",
"array_2": "b",
"array_3": "",
"array_4": "",
}
form = SplitForm(data)
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data, {"array": ["a", "", "b"]})
def test_remove_trailing_nulls_not_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(
forms.CharField(required=False),
size=2,
remove_trailing_nulls=True,
required=False,
)
data = {"array_0": "", "array_1": ""}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {"array": []})
def test_required_field(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {"array_0": "a", "array_1": "b", "array_2": ""}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
"array": [
"Item 3 in the array did not validate: This field is required."
]
},
)
def test_invalid_integer(self):
msg = (
"Item 2 in the array did not validate: Ensure this value is less than or "
"equal to 100."
)
with self.assertRaisesMessage(exceptions.ValidationError, msg):
SplitArrayField(forms.IntegerField(max_value=100), size=2).clean([0, 101])
# To locate the widget's template.
@modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"})
def test_rendering(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
self.assertHTMLEqual(
str(SplitForm()),
"""
<div>
<label for="id_array_0">Array:</label>
<input id="id_array_0" name="array_0" type="text" required>
<input id="id_array_1" name="array_1" type="text" required>
<input id="id_array_2" name="array_2" type="text" required>
</div>
""",
)
def test_invalid_char_length(self):
field = SplitArrayField(forms.CharField(max_length=2), size=3)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(["abc", "c", "defg"])
self.assertEqual(
cm.exception.messages,
[
"Item 1 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 3).",
"Item 3 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 4).",
],
)
def test_splitarraywidget_value_omitted_from_data(self):
class Form(forms.ModelForm):
field = SplitArrayField(forms.IntegerField(), required=False, size=2)
class Meta:
model = IntegerArrayModel
fields = ("field",)
form = Form({"field_0": "1", "field_1": "2"})
self.assertEqual(form.errors, {})
obj = form.save(commit=False)
self.assertEqual(obj.field, [1, 2])
def test_splitarrayfield_has_changed(self):
class Form(forms.ModelForm):
field = SplitArrayField(forms.IntegerField(), required=False, size=2)
class Meta:
model = IntegerArrayModel
fields = ("field",)
tests = [
({}, {"field_0": "", "field_1": ""}, True),
({"field": None}, {"field_0": "", "field_1": ""}, True),
({"field": [1]}, {"field_0": "", "field_1": ""}, True),
({"field": [1]}, {"field_0": "1", "field_1": "0"}, True),
({"field": [1, 2]}, {"field_0": "1", "field_1": "2"}, False),
({"field": [1, 2]}, {"field_0": "a", "field_1": "b"}, True),
]
for initial, data, expected_result in tests:
with self.subTest(initial=initial, data=data):
obj = IntegerArrayModel(**initial)
form = Form(data, instance=obj)
self.assertIs(form.has_changed(), expected_result)
def test_splitarrayfield_remove_trailing_nulls_has_changed(self):
class Form(forms.ModelForm):
field = SplitArrayField(
forms.IntegerField(), required=False, size=2, remove_trailing_nulls=True
)
class Meta:
model = IntegerArrayModel
fields = ("field",)
tests = [
({}, {"field_0": "", "field_1": ""}, False),
({"field": None}, {"field_0": "", "field_1": ""}, False),
({"field": []}, {"field_0": "", "field_1": ""}, False),
({"field": [1]}, {"field_0": "1", "field_1": ""}, False),
]
for initial, data, expected_result in tests:
with self.subTest(initial=initial, data=data):
obj = IntegerArrayModel(**initial)
form = Form(data, instance=obj)
self.assertIs(form.has_changed(), expected_result)
class TestSplitFormWidget(PostgreSQLWidgetTestCase):
def test_get_context(self):
self.assertEqual(
SplitArrayWidget(forms.TextInput(), size=2).get_context(
"name", ["val1", "val2"]
),
{
"widget": {
"name": "name",
"is_hidden": False,
"required": False,
"value": "['val1', 'val2']",
"attrs": {},
"template_name": "postgres/widgets/split_array.html",
"subwidgets": [
{
"name": "name_0",
"is_hidden": False,
"required": False,
"value": "val1",
"attrs": {},
"template_name": "django/forms/widgets/text.html",
"type": "text",
},
{
"name": "name_1",
"is_hidden": False,
"required": False,
"value": "val2",
"attrs": {},
"template_name": "django/forms/widgets/text.html",
"type": "text",
},
],
}
},
)
def test_checkbox_get_context_attrs(self):
context = SplitArrayWidget(
forms.CheckboxInput(),
size=2,
).get_context("name", [True, False])
self.assertEqual(context["widget"]["value"], "[True, False]")
self.assertEqual(
[subwidget["attrs"] for subwidget in context["widget"]["subwidgets"]],
[{"checked": True}, {}],
)
def test_render(self):
self.check_html(
SplitArrayWidget(forms.TextInput(), size=2),
"array",
None,
"""
<input name="array_0" type="text">
<input name="array_1" type="text">
""",
)
def test_render_attrs(self):
self.check_html(
SplitArrayWidget(forms.TextInput(), size=2),
"array",
["val1", "val2"],
attrs={"id": "foo"},
html=(
"""
<input id="foo_0" name="array_0" type="text" value="val1">
<input id="foo_1" name="array_1" type="text" value="val2">
"""
),
)
def test_value_omitted_from_data(self):
widget = SplitArrayWidget(forms.TextInput(), size=2)
self.assertIs(widget.value_omitted_from_data({}, {}, "field"), True)
self.assertIs(
widget.value_omitted_from_data({"field_0": "value"}, {}, "field"), False
)
self.assertIs(
widget.value_omitted_from_data({"field_1": "value"}, {}, "field"), False
)
self.assertIs(
widget.value_omitted_from_data(
{"field_0": "value", "field_1": "value"}, {}, "field"
),
False,
)
class TestAdminUtils(PostgreSQLTestCase):
empty_value = "-empty-"
def test_array_display_for_field(self):
array_field = ArrayField(models.IntegerField())
display_value = display_for_field(
[1, 2],
array_field,
self.empty_value,
)
self.assertEqual(display_value, "1, 2")
def test_array_with_choices_display_for_field(self):
array_field = ArrayField(
models.IntegerField(),
choices=[
([1, 2, 3], "1st choice"),
([1, 2], "2nd choice"),
],
)
display_value = display_for_field(
[1, 2],
array_field,
self.empty_value,
)
self.assertEqual(display_value, "2nd choice")
display_value = display_for_field(
[99, 99],
array_field,
self.empty_value,
)
self.assertEqual(display_value, self.empty_value)
|
b77346af79013fbfed00c6a8688307fa79ae86d153be260f35a097e2fdf25139 | import inspect
import threading
from datetime import datetime, timedelta
from unittest import mock
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections, models
from django.db.models.manager import BaseManager
from django.db.models.query import MAX_GET_RESULTS, EmptyQuerySet
from django.test import (
SimpleTestCase,
TestCase,
TransactionTestCase,
skipUnlessDBFeature,
)
from django.utils.translation import gettext_lazy
from .models import (
Article,
ArticleSelectOnSave,
ChildPrimaryKeyWithDefault,
FeaturedArticle,
PrimaryKeyWithDefault,
SelfRef,
)
class ModelInstanceCreationTests(TestCase):
def test_object_is_not_written_to_database_until_save_was_called(self):
a = Article(
id=None,
headline="Parrot programs in Python",
pub_date=datetime(2005, 7, 28),
)
self.assertIsNone(a.id)
self.assertEqual(Article.objects.count(), 0)
# Save it into the database. You have to call save() explicitly.
a.save()
self.assertIsNotNone(a.id)
self.assertEqual(Article.objects.count(), 1)
def test_can_initialize_model_instance_using_positional_arguments(self):
"""
You can initialize a model instance using positional arguments,
which should match the field order as defined in the model.
"""
a = Article(None, "Second article", datetime(2005, 7, 29))
a.save()
self.assertEqual(a.headline, "Second article")
self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0))
def test_can_create_instance_using_kwargs(self):
a = Article(
id=None,
headline="Third article",
pub_date=datetime(2005, 7, 30),
)
a.save()
self.assertEqual(a.headline, "Third article")
self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0))
def test_autofields_generate_different_values_for_each_instance(self):
a1 = Article.objects.create(
headline="First", pub_date=datetime(2005, 7, 30, 0, 0)
)
a2 = Article.objects.create(
headline="First", pub_date=datetime(2005, 7, 30, 0, 0)
)
a3 = Article.objects.create(
headline="First", pub_date=datetime(2005, 7, 30, 0, 0)
)
self.assertNotEqual(a3.id, a1.id)
self.assertNotEqual(a3.id, a2.id)
def test_can_mix_and_match_position_and_kwargs(self):
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a = Article(None, "Fourth article", pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, "Fourth article")
def test_positional_and_keyword_args_for_the_same_field(self):
msg = "Article() got both positional and keyword arguments for field '%s'."
with self.assertRaisesMessage(TypeError, msg % "headline"):
Article(None, "Fifth article", headline="Other headline.")
with self.assertRaisesMessage(TypeError, msg % "headline"):
Article(None, "Sixth article", headline="")
with self.assertRaisesMessage(TypeError, msg % "pub_date"):
Article(None, "Seventh article", datetime(2021, 3, 1), pub_date=None)
def test_cannot_create_instance_with_invalid_kwargs(self):
msg = "Article() got unexpected keyword arguments: 'foo'"
with self.assertRaisesMessage(TypeError, msg):
Article(
id=None,
headline="Some headline",
pub_date=datetime(2005, 7, 31),
foo="bar",
)
msg = "Article() got unexpected keyword arguments: 'foo', 'bar'"
with self.assertRaisesMessage(TypeError, msg):
Article(
id=None,
headline="Some headline",
pub_date=datetime(2005, 7, 31),
foo="bar",
bar="baz",
)
def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self):
"""
You can leave off the value for an AutoField when creating an
object, because it'll get filled in automatically when you save().
"""
a = Article(headline="Article 5", pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, "Article 5")
self.assertIsNotNone(a.id)
def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self):
a = Article(pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, "Default headline")
def test_for_datetimefields_saves_as_much_precision_as_was_given(self):
"""as much precision in *seconds*"""
a1 = Article(
headline="Article 7",
pub_date=datetime(2005, 7, 31, 12, 30),
)
a1.save()
self.assertEqual(
Article.objects.get(id__exact=a1.id).pub_date, datetime(2005, 7, 31, 12, 30)
)
a2 = Article(
headline="Article 8",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a2.save()
self.assertEqual(
Article.objects.get(id__exact=a2.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45),
)
def test_saving_an_object_again_does_not_create_a_new_object(self):
a = Article(headline="original", pub_date=datetime(2014, 5, 16))
a.save()
current_id = a.id
a.save()
self.assertEqual(a.id, current_id)
a.headline = "Updated headline"
a.save()
self.assertEqual(a.id, current_id)
def test_querysets_checking_for_membership(self):
headlines = ["Parrot programs in Python", "Second article", "Third article"]
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
a = Article(headline="Some headline", pub_date=some_pub_date)
a.save()
# You can use 'in' to test for membership...
self.assertIn(a, Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a.id).exists())
def test_save_primary_with_default(self):
# An UPDATE attempt is skipped when a primary key has default.
with self.assertNumQueries(1):
PrimaryKeyWithDefault().save()
def test_save_parent_primary_with_default(self):
# An UPDATE attempt is skipped when an inherited primary key has
# default.
with self.assertNumQueries(2):
ChildPrimaryKeyWithDefault().save()
class ModelTest(TestCase):
def test_objects_attribute_is_only_available_on_the_class_itself(self):
with self.assertRaisesMessage(
AttributeError, "Manager isn't accessible via Article instances"
):
getattr(
Article(),
"objects",
)
self.assertFalse(hasattr(Article(), "objects"))
self.assertTrue(hasattr(Article, "objects"))
def test_queryset_delete_removes_all_items_in_that_queryset(self):
headlines = ["An article", "Article One", "Amazing article", "Boring article"]
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(
Article.objects.order_by("headline"),
sorted(headlines),
transform=lambda a: a.headline,
)
Article.objects.filter(headline__startswith="A").delete()
self.assertEqual(Article.objects.get().headline, "Boring article")
def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self):
some_pub_date = datetime(2014, 5, 16, 12, 1)
a1 = Article.objects.create(headline="First", pub_date=some_pub_date)
a2 = Article.objects.create(headline="Second", pub_date=some_pub_date)
self.assertNotEqual(a1, a2)
self.assertEqual(a1, Article.objects.get(id__exact=a1.id))
self.assertNotEqual(
Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id)
)
def test_microsecond_precision(self):
a9 = Article(
headline="Article 9",
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(
Article.objects.get(pk=a9.pk).pub_date,
datetime(2005, 7, 31, 12, 30, 45, 180),
)
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline="Article 101",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, "Article 101")
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
a11 = Article.objects.create(
headline="Article 11",
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline="Article 12",
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertSequenceEqual(
Article.objects.filter(pub_date__year=2008),
[a11, a12],
)
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline="\u6797\u539f \u3081\u3050\u307f",
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(
Article.objects.get(pk=a.id).headline, "\u6797\u539f \u3081\u3050\u307f"
)
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline="Article 11",
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline="Article 12",
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = {a10, a11, a12}
self.assertIn(Article.objects.get(headline="Article 11"), s)
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
Article.objects.bulk_create(
[
Article(
headline="Article 10", pub_date=datetime(2005, 7, 31, 12, 30, 45)
),
Article(headline="Article 11", pub_date=datetime(2008, 1, 1)),
Article(
headline="Article 12",
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
),
]
)
dicts = (
Article.objects.filter(pub_date__year=2008)
.extra(select={"dashed-value": "1"})
.values("headline", "dashed-value")
)
self.assertEqual(
[sorted(d.items()) for d in dicts],
[
[("dashed-value", 1), ("headline", "Article 11")],
[("dashed-value", 1), ("headline", "Article 12")],
],
)
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
Article.objects.bulk_create(
[
Article(
headline="Article 10", pub_date=datetime(2005, 7, 31, 12, 30, 45)
),
Article(headline="Article 11", pub_date=datetime(2008, 1, 1)),
Article(
headline="Article 12",
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
),
]
)
articles = Article.objects.filter(pub_date__year=2008).extra(
select={"dashed-value": "1", "undashedvalue": "2"}
)
self.assertEqual(articles[0].undashedvalue, 2)
def test_create_relation_with_gettext_lazy(self):
"""
gettext_lazy objects work when saving model instances
through various methods. Refs #10498.
"""
notlazy = "test"
lazy = gettext_lazy(notlazy)
Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# test that assign + save works with Promise objects
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
# test .update()
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# still test bulk_create()
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
def test_emptyqs(self):
msg = "EmptyQuerySet can't be instantiated"
with self.assertRaisesMessage(TypeError, msg):
EmptyQuerySet()
self.assertIsInstance(Article.objects.none(), EmptyQuerySet)
self.assertNotIsInstance("", EmptyQuerySet)
def test_emptyqs_values(self):
# test for #15959
Article.objects.create(headline="foo", pub_date=datetime.now())
with self.assertNumQueries(0):
qs = Article.objects.none().values_list("pk")
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(len(qs), 0)
def test_emptyqs_customqs(self):
# A hacky test for custom QuerySet subclass - refs #17271
Article.objects.create(headline="foo", pub_date=datetime.now())
class CustomQuerySet(models.QuerySet):
def do_something(self):
return "did something"
qs = Article.objects.all()
qs.__class__ = CustomQuerySet
qs = qs.none()
with self.assertNumQueries(0):
self.assertEqual(len(qs), 0)
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(qs.do_something(), "did something")
def test_emptyqs_values_order(self):
# Tests for ticket #17712
Article.objects.create(headline="foo", pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(
len(Article.objects.none().values_list("id").order_by("id")), 0
)
with self.assertNumQueries(0):
self.assertEqual(
len(
Article.objects.none().filter(
id__in=Article.objects.values_list("id", flat=True)
)
),
0,
)
@skipUnlessDBFeature("can_distinct_on_fields")
def test_emptyqs_distinct(self):
# Tests for #19426
Article.objects.create(headline="foo", pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(
len(Article.objects.none().distinct("headline", "pub_date")), 0
)
def test_ticket_20278(self):
sr = SelfRef.objects.create()
with self.assertRaises(ObjectDoesNotExist):
SelfRef.objects.get(selfref=sr)
def test_eq(self):
self.assertEqual(Article(id=1), Article(id=1))
self.assertNotEqual(Article(id=1), object())
self.assertNotEqual(object(), Article(id=1))
a = Article()
self.assertEqual(a, a)
self.assertEqual(a, mock.ANY)
self.assertNotEqual(Article(), a)
def test_hash(self):
# Value based on PK
self.assertEqual(hash(Article(id=1)), hash(1))
msg = "Model instances without primary key value are unhashable"
with self.assertRaisesMessage(TypeError, msg):
# No PK value -> unhashable (because save() would then change
# hash)
hash(Article())
def test_missing_hash_not_inherited(self):
class NoHash(models.Model):
def __eq__(self, other):
return super.__eq__(other)
with self.assertRaisesMessage(TypeError, "unhashable type: 'NoHash'"):
hash(NoHash(id=1))
def test_specified_parent_hash_inherited(self):
class ParentHash(models.Model):
def __eq__(self, other):
return super.__eq__(other)
__hash__ = models.Model.__hash__
self.assertEqual(hash(ParentHash(id=1)), 1)
def test_delete_and_access_field(self):
# Accessing a field after it's deleted from a model reloads its value.
pub_date = datetime.now()
article = Article.objects.create(headline="foo", pub_date=pub_date)
new_pub_date = article.pub_date + timedelta(days=10)
article.headline = "bar"
article.pub_date = new_pub_date
del article.headline
with self.assertNumQueries(1):
self.assertEqual(article.headline, "foo")
# Fields that weren't deleted aren't reloaded.
self.assertEqual(article.pub_date, new_pub_date)
def test_multiple_objects_max_num_fetched(self):
max_results = MAX_GET_RESULTS - 1
Article.objects.bulk_create(
Article(headline="Area %s" % i, pub_date=datetime(2005, 7, 28))
for i in range(max_results)
)
self.assertRaisesMessage(
MultipleObjectsReturned,
"get() returned more than one Article -- it returned %d!" % max_results,
Article.objects.get,
headline__startswith="Area",
)
Article.objects.create(
headline="Area %s" % max_results, pub_date=datetime(2005, 7, 28)
)
self.assertRaisesMessage(
MultipleObjectsReturned,
"get() returned more than one Article -- it returned more than %d!"
% max_results,
Article.objects.get,
headline__startswith="Area",
)
class ModelLookupTest(TestCase):
@classmethod
def setUpTestData(cls):
# Create an Article.
cls.a = Article(
id=None,
headline="Swallow programs in Python",
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
cls.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = "Parrot programs in Python"
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertSequenceEqual(Article.objects.all(), [self.a])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith="Swallow"), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(
Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a
)
self.assertEqual(
Article.objects.get(
pub_date__year=2005, pub_date__month=7, pub_date__day=28
),
self.a,
)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(
Article.objects.get(headline="Swallow programs in Python"), self.a
)
self.assertSequenceEqual(
Article.objects.filter(pub_date__year=2005),
[self.a],
)
self.assertSequenceEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertSequenceEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
[self.a],
)
self.assertSequenceEqual(
Article.objects.filter(pub_date__week_day=5),
[self.a],
)
self.assertSequenceEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
with self.assertRaisesMessage(
ObjectDoesNotExist, "Article matching query does not exist."
):
Article.objects.get(
id__exact=2000,
)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
with self.assertRaises(ObjectDoesNotExist):
Article.objects.get(pub_date__year=2005, pub_date__month=8)
with self.assertRaisesMessage(
ObjectDoesNotExist, "Article matching query does not exist."
):
Article.objects.get(
pub_date__week_day=6,
)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertSequenceEqual(Article.objects.filter(pk__in=[self.a.id]), [self.a])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline="Swallow bites Python",
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
msg = "get() returned more than one Article -- it returned 2!"
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(
headline__startswith="Swallow",
)
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(
pub_date__year=2005,
)
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(pub_date__year=2005, pub_date__month=7)
class ConcurrentSaveTests(TransactionTestCase):
available_apps = ["basic"]
@skipUnlessDBFeature("test_db_allows_multiple_connections")
def test_concurrent_delete_with_save(self):
"""
Test fetching, deleting and finally saving an object - we should get
an insert in this case.
"""
a = Article.objects.create(headline="foo", pub_date=datetime.now())
exceptions = []
def deleter():
try:
# Do not delete a directly - doing so alters its state.
Article.objects.filter(pk=a.pk).delete()
except Exception as e:
exceptions.append(e)
finally:
connections[DEFAULT_DB_ALIAS].close()
self.assertEqual(len(exceptions), 0)
t = threading.Thread(target=deleter)
t.start()
t.join()
a.save()
self.assertEqual(Article.objects.get(pk=a.pk).headline, "foo")
class ManagerTest(SimpleTestCase):
QUERYSET_PROXY_METHODS = [
"none",
"count",
"dates",
"datetimes",
"distinct",
"extra",
"get",
"get_or_create",
"update_or_create",
"create",
"bulk_create",
"bulk_update",
"filter",
"aggregate",
"annotate",
"alias",
"complex_filter",
"exclude",
"in_bulk",
"iterator",
"earliest",
"latest",
"first",
"last",
"order_by",
"select_for_update",
"select_related",
"prefetch_related",
"values",
"values_list",
"update",
"reverse",
"defer",
"only",
"using",
"exists",
"contains",
"explain",
"_insert",
"_update",
"raw",
"union",
"intersection",
"difference",
"aaggregate",
"abulk_create",
"abulk_update",
"acontains",
"acount",
"acreate",
"aearliest",
"aexists",
"aexplain",
"afirst",
"aget",
"aget_or_create",
"ain_bulk",
"aiterator",
"alast",
"alatest",
"aupdate",
"aupdate_or_create",
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(models.QuerySet)),
sorted(self.QUERYSET_PROXY_METHODS),
)
def test_manager_method_attributes(self):
self.assertEqual(Article.objects.get.__doc__, models.QuerySet.get.__doc__)
self.assertEqual(Article.objects.count.__name__, models.QuerySet.count.__name__)
def test_manager_method_signature(self):
self.assertEqual(
str(inspect.signature(Article.objects.bulk_create)),
"(objs, batch_size=None, ignore_conflicts=False, update_conflicts=False, "
"update_fields=None, unique_fields=None)",
)
class SelectOnSaveTests(TestCase):
def test_select_on_save(self):
a1 = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(1):
a1.save()
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(2):
asos.save()
with self.assertNumQueries(1):
asos.save(force_update=True)
Article.objects.all().delete()
with self.assertRaisesMessage(
DatabaseError, "Forced update did not affect any rows."
):
with self.assertNumQueries(1):
asos.save(force_update=True)
def test_select_on_save_lying_update(self):
"""
select_on_save works correctly if the database doesn't return correct
information about matched rows from UPDATE.
"""
# Change the manager to not return "row matched" for update().
# We are going to change the Article's _base_manager class
# dynamically. This is a bit of a hack, but it seems hard to
# test this properly otherwise. Article's manager, because
# proxy models use their parent model's _base_manager.
orig_class = Article._base_manager._queryset_class
class FakeQuerySet(models.QuerySet):
# Make sure the _update method below is in fact called.
called = False
def _update(self, *args, **kwargs):
FakeQuerySet.called = True
super()._update(*args, **kwargs)
return 0
try:
Article._base_manager._queryset_class = FakeQuerySet
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(3):
asos.save()
self.assertTrue(FakeQuerySet.called)
# This is not wanted behavior, but this is how Django has always
# behaved for databases that do not return correct information
# about matched rows for UPDATE.
with self.assertRaisesMessage(
DatabaseError, "Forced update did not affect any rows."
):
asos.save(force_update=True)
msg = (
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block."
)
with self.assertRaisesMessage(DatabaseError, msg) as cm:
asos.save(update_fields=["pub_date"])
self.assertIsInstance(cm.exception.__cause__, DatabaseError)
finally:
Article._base_manager._queryset_class = orig_class
class ModelRefreshTests(TestCase):
def test_refresh(self):
a = Article.objects.create(pub_date=datetime.now())
Article.objects.create(pub_date=datetime.now())
Article.objects.filter(pk=a.pk).update(headline="new headline")
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.headline, "new headline")
orig_pub_date = a.pub_date
new_pub_date = a.pub_date + timedelta(10)
Article.objects.update(headline="new headline 2", pub_date=new_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db(fields=["headline"])
self.assertEqual(a.headline, "new headline 2")
self.assertEqual(a.pub_date, orig_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.pub_date, new_pub_date)
def test_unknown_kwarg(self):
s = SelfRef.objects.create()
msg = "refresh_from_db() got an unexpected keyword argument 'unknown_kwarg'"
with self.assertRaisesMessage(TypeError, msg):
s.refresh_from_db(unknown_kwarg=10)
def test_lookup_in_fields(self):
s = SelfRef.objects.create()
msg = (
'Found "__" in fields argument. Relations and transforms are not allowed '
"in fields."
)
with self.assertRaisesMessage(ValueError, msg):
s.refresh_from_db(fields=["foo__bar"])
def test_refresh_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create()
s3 = SelfRef.objects.create(selfref=s1)
s3_copy = SelfRef.objects.get(pk=s3.pk)
s3_copy.selfref.touched = True
s3.selfref = s2
s3.save()
with self.assertNumQueries(1):
s3_copy.refresh_from_db()
with self.assertNumQueries(1):
# The old related instance was thrown away (the selfref_id has
# changed). It needs to be reloaded on access, so one query
# executed.
self.assertFalse(hasattr(s3_copy.selfref, "touched"))
self.assertEqual(s3_copy.selfref, s2)
def test_refresh_null_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create(selfref=s1)
s2.selfref = None
s2.refresh_from_db()
self.assertEqual(s2.selfref, s1)
def test_refresh_unsaved(self):
pub_date = datetime.now()
a = Article.objects.create(pub_date=pub_date)
a2 = Article(id=a.pk)
with self.assertNumQueries(1):
a2.refresh_from_db()
self.assertEqual(a2.pub_date, pub_date)
self.assertEqual(a2._state.db, "default")
def test_refresh_fk_on_delete_set_null(self):
a = Article.objects.create(
headline="Parrot programs in Python",
pub_date=datetime(2005, 7, 28),
)
s1 = SelfRef.objects.create(article=a)
a.delete()
s1.refresh_from_db()
self.assertIsNone(s1.article_id)
self.assertIsNone(s1.article)
def test_refresh_no_fields(self):
a = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(0):
a.refresh_from_db(fields=[])
def test_refresh_clears_reverse_related(self):
"""refresh_from_db() clear cached reverse relations."""
article = Article.objects.create(
headline="Parrot programs in Python",
pub_date=datetime(2005, 7, 28),
)
self.assertFalse(hasattr(article, "featured"))
FeaturedArticle.objects.create(article_id=article.pk)
article.refresh_from_db()
self.assertTrue(hasattr(article, "featured"))
def test_refresh_clears_one_to_one_field(self):
article = Article.objects.create(
headline="Parrot programs in Python",
pub_date=datetime(2005, 7, 28),
)
featured = FeaturedArticle.objects.create(article_id=article.pk)
self.assertEqual(featured.article.headline, "Parrot programs in Python")
article.headline = "Parrot programs in Python 2.0"
article.save()
featured.refresh_from_db()
self.assertEqual(featured.article.headline, "Parrot programs in Python 2.0")
def test_prefetched_cache_cleared(self):
a = Article.objects.create(pub_date=datetime(2005, 7, 28))
s = SelfRef.objects.create(article=a)
# refresh_from_db() without fields=[...]
a1_prefetched = Article.objects.prefetch_related("selfref_set").first()
self.assertCountEqual(a1_prefetched.selfref_set.all(), [s])
s.article = None
s.save()
# Relation is cleared and prefetch cache is stale.
self.assertCountEqual(a1_prefetched.selfref_set.all(), [s])
a1_prefetched.refresh_from_db()
# Cache was cleared and new results are available.
self.assertCountEqual(a1_prefetched.selfref_set.all(), [])
# refresh_from_db() with fields=[...]
a2_prefetched = Article.objects.prefetch_related("selfref_set").first()
self.assertCountEqual(a2_prefetched.selfref_set.all(), [])
s.article = a
s.save()
# Relation is added and prefetch cache is stale.
self.assertCountEqual(a2_prefetched.selfref_set.all(), [])
a2_prefetched.refresh_from_db(fields=["selfref_set"])
# Cache was cleared and new results are available.
self.assertCountEqual(a2_prefetched.selfref_set.all(), [s])
|
acd2fc8b48f73c065c99258af872e85efad42d640c0402fe7fa6d4f113ae577d | import operator
from django.db import DatabaseError, NotSupportedError, connection
from django.db.models import Exists, F, IntegerField, OuterRef, Subquery, Value
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from .models import Author, Celebrity, ExtraInfo, Number, ReservedName
@skipUnlessDBFeature("supports_select_union")
class QuerySetSetOperationTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.bulk_create(Number(num=i, other_num=10 - i) for i in range(10))
def assertNumbersEqual(self, queryset, expected_numbers, ordered=True):
self.assertQuerysetEqual(
queryset, expected_numbers, operator.attrgetter("num"), ordered
)
def test_simple_union(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=8)
qs3 = Number.objects.filter(num=5)
self.assertNumbersEqual(qs1.union(qs2, qs3), [0, 1, 5, 8, 9], ordered=False)
@skipUnlessDBFeature("supports_select_intersection")
def test_simple_intersection(self):
qs1 = Number.objects.filter(num__lte=5)
qs2 = Number.objects.filter(num__gte=5)
qs3 = Number.objects.filter(num__gte=4, num__lte=6)
self.assertNumbersEqual(qs1.intersection(qs2, qs3), [5], ordered=False)
@skipUnlessDBFeature("supports_select_intersection")
def test_intersection_with_values(self):
ReservedName.objects.create(name="a", order=2)
qs1 = ReservedName.objects.all()
reserved_name = qs1.intersection(qs1).values("name", "order", "id").get()
self.assertEqual(reserved_name["name"], "a")
self.assertEqual(reserved_name["order"], 2)
reserved_name = qs1.intersection(qs1).values_list("name", "order", "id").get()
self.assertEqual(reserved_name[:2], ("a", 2))
@skipUnlessDBFeature("supports_select_difference")
def test_simple_difference(self):
qs1 = Number.objects.filter(num__lte=5)
qs2 = Number.objects.filter(num__lte=4)
self.assertNumbersEqual(qs1.difference(qs2), [5], ordered=False)
def test_union_distinct(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
self.assertEqual(len(list(qs1.union(qs2, all=True))), 20)
self.assertEqual(len(list(qs1.union(qs2))), 10)
def test_union_none(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=8)
qs3 = qs1.union(qs2)
self.assertSequenceEqual(qs3.none(), [])
self.assertNumbersEqual(qs3, [0, 1, 8, 9], ordered=False)
def test_union_order_with_null_first_last(self):
Number.objects.filter(other_num=5).update(other_num=None)
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=2)
qs3 = qs1.union(qs2)
self.assertSequenceEqual(
qs3.order_by(
F("other_num").asc(nulls_first=True),
).values_list("other_num", flat=True),
[None, 1, 2, 3, 4, 6, 7, 8, 9, 10],
)
self.assertSequenceEqual(
qs3.order_by(
F("other_num").asc(nulls_last=True),
).values_list("other_num", flat=True),
[1, 2, 3, 4, 6, 7, 8, 9, 10, None],
)
@skipUnlessDBFeature("supports_select_intersection")
def test_intersection_with_empty_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.none()
qs3 = Number.objects.filter(pk__in=[])
self.assertEqual(len(qs1.intersection(qs2)), 0)
self.assertEqual(len(qs1.intersection(qs3)), 0)
self.assertEqual(len(qs2.intersection(qs1)), 0)
self.assertEqual(len(qs3.intersection(qs1)), 0)
self.assertEqual(len(qs2.intersection(qs2)), 0)
self.assertEqual(len(qs3.intersection(qs3)), 0)
@skipUnlessDBFeature("supports_select_difference")
def test_difference_with_empty_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.none()
qs3 = Number.objects.filter(pk__in=[])
self.assertEqual(len(qs1.difference(qs2)), 10)
self.assertEqual(len(qs1.difference(qs3)), 10)
self.assertEqual(len(qs2.difference(qs1)), 0)
self.assertEqual(len(qs3.difference(qs1)), 0)
self.assertEqual(len(qs2.difference(qs2)), 0)
self.assertEqual(len(qs3.difference(qs3)), 0)
@skipUnlessDBFeature("supports_select_difference")
def test_difference_with_values(self):
ReservedName.objects.create(name="a", order=2)
qs1 = ReservedName.objects.all()
qs2 = ReservedName.objects.none()
reserved_name = qs1.difference(qs2).values("name", "order", "id").get()
self.assertEqual(reserved_name["name"], "a")
self.assertEqual(reserved_name["order"], 2)
reserved_name = qs1.difference(qs2).values_list("name", "order", "id").get()
self.assertEqual(reserved_name[:2], ("a", 2))
def test_union_with_empty_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.none()
qs3 = Number.objects.filter(pk__in=[])
self.assertEqual(len(qs1.union(qs2)), 10)
self.assertEqual(len(qs2.union(qs1)), 10)
self.assertEqual(len(qs1.union(qs3)), 10)
self.assertEqual(len(qs3.union(qs1)), 10)
self.assertEqual(len(qs2.union(qs1, qs1, qs1)), 10)
self.assertEqual(len(qs2.union(qs1, qs1, all=True)), 20)
self.assertEqual(len(qs2.union(qs2)), 0)
self.assertEqual(len(qs3.union(qs3)), 0)
def test_empty_qs_union_with_ordered_qs(self):
qs1 = Number.objects.order_by("num")
qs2 = Number.objects.none().union(qs1).order_by("num")
self.assertEqual(list(qs1), list(qs2))
def test_limits(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
self.assertEqual(len(list(qs1.union(qs2)[:2])), 2)
def test_ordering(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=2, num__lte=3)
self.assertNumbersEqual(qs1.union(qs2).order_by("-num"), [3, 2, 1, 0])
def test_ordering_by_alias(self):
qs1 = Number.objects.filter(num__lte=1).values(alias=F("num"))
qs2 = Number.objects.filter(num__gte=2, num__lte=3).values(alias=F("num"))
self.assertQuerysetEqual(
qs1.union(qs2).order_by("-alias"),
[3, 2, 1, 0],
operator.itemgetter("alias"),
)
def test_ordering_by_f_expression(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=2, num__lte=3)
self.assertNumbersEqual(qs1.union(qs2).order_by(F("num").desc()), [3, 2, 1, 0])
def test_ordering_by_f_expression_and_alias(self):
qs1 = Number.objects.filter(num__lte=1).values(alias=F("other_num"))
qs2 = Number.objects.filter(num__gte=2, num__lte=3).values(alias=F("other_num"))
self.assertQuerysetEqual(
qs1.union(qs2).order_by(F("alias").desc()),
[10, 9, 8, 7],
operator.itemgetter("alias"),
)
Number.objects.create(num=-1)
self.assertQuerysetEqual(
qs1.union(qs2).order_by(F("alias").desc(nulls_last=True)),
[10, 9, 8, 7, None],
operator.itemgetter("alias"),
)
def test_union_with_values(self):
ReservedName.objects.create(name="a", order=2)
qs1 = ReservedName.objects.all()
reserved_name = qs1.union(qs1).values("name", "order", "id").get()
self.assertEqual(reserved_name["name"], "a")
self.assertEqual(reserved_name["order"], 2)
reserved_name = qs1.union(qs1).values_list("name", "order", "id").get()
self.assertEqual(reserved_name[:2], ("a", 2))
# List of columns can be changed.
reserved_name = qs1.union(qs1).values_list("order").get()
self.assertEqual(reserved_name, (2,))
def test_union_with_two_annotated_values_list(self):
qs1 = (
Number.objects.filter(num=1)
.annotate(
count=Value(0, IntegerField()),
)
.values_list("num", "count")
)
qs2 = (
Number.objects.filter(num=2)
.values("pk")
.annotate(
count=F("num"),
)
.annotate(
num=Value(1, IntegerField()),
)
.values_list("num", "count")
)
self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)])
def test_union_with_extra_and_values_list(self):
qs1 = (
Number.objects.filter(num=1)
.extra(
select={"count": 0},
)
.values_list("num", "count")
)
qs2 = Number.objects.filter(num=2).extra(select={"count": 1})
self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)])
def test_union_with_values_list_on_annotated_and_unannotated(self):
ReservedName.objects.create(name="rn1", order=1)
qs1 = Number.objects.annotate(
has_reserved_name=Exists(ReservedName.objects.filter(order=OuterRef("num")))
).filter(has_reserved_name=True)
qs2 = Number.objects.filter(num=9)
self.assertCountEqual(qs1.union(qs2).values_list("num", flat=True), [1, 9])
def test_union_with_values_list_and_order(self):
ReservedName.objects.bulk_create(
[
ReservedName(name="rn1", order=7),
ReservedName(name="rn2", order=5),
ReservedName(name="rn0", order=6),
ReservedName(name="rn9", order=-1),
]
)
qs1 = ReservedName.objects.filter(order__gte=6)
qs2 = ReservedName.objects.filter(order__lte=5)
union_qs = qs1.union(qs2)
for qs, expected_result in (
# Order by a single column.
(union_qs.order_by("-pk").values_list("order", flat=True), [-1, 6, 5, 7]),
(union_qs.order_by("pk").values_list("order", flat=True), [7, 5, 6, -1]),
(union_qs.values_list("order", flat=True).order_by("-pk"), [-1, 6, 5, 7]),
(union_qs.values_list("order", flat=True).order_by("pk"), [7, 5, 6, -1]),
# Order by multiple columns.
(
union_qs.order_by("-name", "pk").values_list("order", flat=True),
[-1, 5, 7, 6],
),
(
union_qs.values_list("order", flat=True).order_by("-name", "pk"),
[-1, 5, 7, 6],
),
):
with self.subTest(qs=qs):
self.assertEqual(list(qs), expected_result)
def test_union_with_values_list_and_order_on_annotation(self):
qs1 = Number.objects.annotate(
annotation=Value(-1),
multiplier=F("annotation"),
).filter(num__gte=6)
qs2 = Number.objects.annotate(
annotation=Value(2),
multiplier=F("annotation"),
).filter(num__lte=5)
self.assertSequenceEqual(
qs1.union(qs2).order_by("annotation", "num").values_list("num", flat=True),
[6, 7, 8, 9, 0, 1, 2, 3, 4, 5],
)
self.assertQuerysetEqual(
qs1.union(qs2)
.order_by(
F("annotation") * F("multiplier"),
"num",
)
.values("num"),
[6, 7, 8, 9, 0, 1, 2, 3, 4, 5],
operator.itemgetter("num"),
)
def test_union_multiple_models_with_values_list_and_order(self):
reserved_name = ReservedName.objects.create(name="rn1", order=0)
qs1 = Celebrity.objects.all()
qs2 = ReservedName.objects.all()
self.assertSequenceEqual(
qs1.union(qs2).order_by("name").values_list("pk", flat=True),
[reserved_name.pk],
)
def test_union_multiple_models_with_values_list_and_order_by_extra_select(self):
reserved_name = ReservedName.objects.create(name="rn1", order=0)
qs1 = Celebrity.objects.extra(select={"extra_name": "name"})
qs2 = ReservedName.objects.extra(select={"extra_name": "name"})
self.assertSequenceEqual(
qs1.union(qs2).order_by("extra_name").values_list("pk", flat=True),
[reserved_name.pk],
)
def test_union_in_subquery(self):
ReservedName.objects.bulk_create(
[
ReservedName(name="rn1", order=8),
ReservedName(name="rn2", order=1),
ReservedName(name="rn3", order=5),
]
)
qs1 = Number.objects.filter(num__gt=7, num=OuterRef("order"))
qs2 = Number.objects.filter(num__lt=2, num=OuterRef("order"))
self.assertCountEqual(
ReservedName.objects.annotate(
number=Subquery(qs1.union(qs2).values("num")),
)
.filter(number__isnull=False)
.values_list("order", flat=True),
[8, 1],
)
def test_union_in_subquery_related_outerref(self):
e1 = ExtraInfo.objects.create(value=7, info="e3")
e2 = ExtraInfo.objects.create(value=5, info="e2")
e3 = ExtraInfo.objects.create(value=1, info="e1")
Author.objects.bulk_create(
[
Author(name="a1", num=1, extra=e1),
Author(name="a2", num=3, extra=e2),
Author(name="a3", num=2, extra=e3),
]
)
qs1 = ExtraInfo.objects.order_by().filter(value=OuterRef("num"))
qs2 = ExtraInfo.objects.order_by().filter(value__lt=OuterRef("extra__value"))
qs = (
Author.objects.annotate(
info=Subquery(qs1.union(qs2).values("info")[:1]),
)
.filter(info__isnull=False)
.values_list("name", flat=True)
)
self.assertCountEqual(qs, ["a1", "a2"])
# Combined queries don't mutate.
self.assertCountEqual(qs, ["a1", "a2"])
@skipUnlessDBFeature("supports_slicing_ordering_in_compound")
def test_union_in_with_ordering(self):
qs1 = Number.objects.filter(num__gt=7).order_by("num")
qs2 = Number.objects.filter(num__lt=2).order_by("num")
self.assertNumbersEqual(
Number.objects.exclude(id__in=qs1.union(qs2).values("id")),
[2, 3, 4, 5, 6, 7],
ordered=False,
)
@skipUnlessDBFeature(
"supports_slicing_ordering_in_compound", "allow_sliced_subqueries_with_in"
)
def test_union_in_with_ordering_and_slice(self):
qs1 = Number.objects.filter(num__gt=7).order_by("num")[:1]
qs2 = Number.objects.filter(num__lt=2).order_by("-num")[:1]
self.assertNumbersEqual(
Number.objects.exclude(id__in=qs1.union(qs2).values("id")),
[0, 2, 3, 4, 5, 6, 7, 9],
ordered=False,
)
def test_count_union(self):
qs1 = Number.objects.filter(num__lte=1).values("num")
qs2 = Number.objects.filter(num__gte=2, num__lte=3).values("num")
self.assertEqual(qs1.union(qs2).count(), 4)
def test_count_union_empty_result(self):
qs = Number.objects.filter(pk__in=[])
self.assertEqual(qs.union(qs).count(), 0)
@skipUnlessDBFeature("supports_select_difference")
def test_count_difference(self):
qs1 = Number.objects.filter(num__lt=10)
qs2 = Number.objects.filter(num__lt=9)
self.assertEqual(qs1.difference(qs2).count(), 1)
@skipUnlessDBFeature("supports_select_intersection")
def test_count_intersection(self):
qs1 = Number.objects.filter(num__gte=5)
qs2 = Number.objects.filter(num__lte=5)
self.assertEqual(qs1.intersection(qs2).count(), 1)
def test_exists_union(self):
qs1 = Number.objects.filter(num__gte=5)
qs2 = Number.objects.filter(num__lte=5)
with CaptureQueriesContext(connection) as context:
self.assertIs(qs1.union(qs2).exists(), True)
captured_queries = context.captured_queries
self.assertEqual(len(captured_queries), 1)
captured_sql = captured_queries[0]["sql"]
self.assertNotIn(
connection.ops.quote_name(Number._meta.pk.column),
captured_sql,
)
self.assertEqual(
captured_sql.count(connection.ops.limit_offset_sql(None, 1)),
3 if connection.features.supports_slicing_ordering_in_compound else 1,
)
def test_exists_union_empty_result(self):
qs = Number.objects.filter(pk__in=[])
self.assertIs(qs.union(qs).exists(), False)
@skipUnlessDBFeature("supports_select_intersection")
def test_exists_intersection(self):
qs1 = Number.objects.filter(num__gt=5)
qs2 = Number.objects.filter(num__lt=5)
self.assertIs(qs1.intersection(qs1).exists(), True)
self.assertIs(qs1.intersection(qs2).exists(), False)
@skipUnlessDBFeature("supports_select_difference")
def test_exists_difference(self):
qs1 = Number.objects.filter(num__gte=5)
qs2 = Number.objects.filter(num__gte=3)
self.assertIs(qs1.difference(qs2).exists(), False)
self.assertIs(qs2.difference(qs1).exists(), True)
def test_get_union(self):
qs = Number.objects.filter(num=2)
self.assertEqual(qs.union(qs).get().num, 2)
@skipUnlessDBFeature("supports_select_difference")
def test_get_difference(self):
qs1 = Number.objects.all()
qs2 = Number.objects.exclude(num=2)
self.assertEqual(qs1.difference(qs2).get().num, 2)
@skipUnlessDBFeature("supports_select_intersection")
def test_get_intersection(self):
qs1 = Number.objects.all()
qs2 = Number.objects.filter(num=2)
self.assertEqual(qs1.intersection(qs2).get().num, 2)
@skipUnlessDBFeature("supports_slicing_ordering_in_compound")
def test_ordering_subqueries(self):
qs1 = Number.objects.order_by("num")[:2]
qs2 = Number.objects.order_by("-num")[:2]
self.assertNumbersEqual(qs1.union(qs2).order_by("-num")[:4], [9, 8, 1, 0])
@skipIfDBFeature("supports_slicing_ordering_in_compound")
def test_unsupported_ordering_slicing_raises_db_error(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
qs3 = Number.objects.all()
msg = "LIMIT/OFFSET not allowed in subqueries of compound statements"
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2[:10]))
msg = "ORDER BY not allowed in subqueries of compound statements"
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.order_by("id").union(qs2))
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by("id").union(qs3))
@skipIfDBFeature("supports_select_intersection")
def test_unsupported_intersection_raises_db_error(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
msg = "intersection is not supported on this database backend"
with self.assertRaisesMessage(NotSupportedError, msg):
list(qs1.intersection(qs2))
def test_combining_multiple_models(self):
ReservedName.objects.create(name="99 little bugs", order=99)
qs1 = Number.objects.filter(num=1).values_list("num", flat=True)
qs2 = ReservedName.objects.values_list("order")
self.assertEqual(list(qs1.union(qs2).order_by("num")), [1, 99])
def test_order_raises_on_non_selected_column(self):
qs1 = (
Number.objects.filter()
.annotate(
annotation=Value(1, IntegerField()),
)
.values("annotation", num2=F("num"))
)
qs2 = Number.objects.filter().values("id", "num")
# Should not raise
list(qs1.union(qs2).order_by("annotation"))
list(qs1.union(qs2).order_by("num2"))
msg = "ORDER BY term does not match any column in the result set"
# 'id' is not part of the select
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by("id"))
# 'num' got realiased to num2
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by("num"))
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by(F("num")))
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by(F("num").desc()))
# switched order, now 'exists' again:
list(qs2.union(qs1).order_by("num"))
@skipUnlessDBFeature("supports_select_difference", "supports_select_intersection")
def test_qs_with_subcompound_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.intersection(Number.objects.filter(num__gt=1))
self.assertEqual(qs1.difference(qs2).count(), 2)
def test_order_by_same_type(self):
qs = Number.objects.all()
union = qs.union(qs)
numbers = list(range(10))
self.assertNumbersEqual(union.order_by("num"), numbers)
self.assertNumbersEqual(union.order_by("other_num"), reversed(numbers))
def test_unsupported_operations_on_combined_qs(self):
qs = Number.objects.all()
msg = "Calling QuerySet.%s() after %s() is not supported."
combinators = ["union"]
if connection.features.supports_select_difference:
combinators.append("difference")
if connection.features.supports_select_intersection:
combinators.append("intersection")
for combinator in combinators:
for operation in (
"alias",
"annotate",
"defer",
"delete",
"distinct",
"exclude",
"extra",
"filter",
"only",
"prefetch_related",
"select_related",
"update",
):
with self.subTest(combinator=combinator, operation=operation):
with self.assertRaisesMessage(
NotSupportedError,
msg % (operation, combinator),
):
getattr(getattr(qs, combinator)(qs), operation)()
with self.assertRaisesMessage(
NotSupportedError,
msg % ("contains", combinator),
):
obj = Number.objects.first()
getattr(qs, combinator)(qs).contains(obj)
def test_get_with_filters_unsupported_on_combined_qs(self):
qs = Number.objects.all()
msg = "Calling QuerySet.get(...) with filters after %s() is not supported."
combinators = ["union"]
if connection.features.supports_select_difference:
combinators.append("difference")
if connection.features.supports_select_intersection:
combinators.append("intersection")
for combinator in combinators:
with self.subTest(combinator=combinator):
with self.assertRaisesMessage(NotSupportedError, msg % combinator):
getattr(qs, combinator)(qs).get(num=2)
def test_operator_on_combined_qs_error(self):
qs = Number.objects.all()
msg = "Cannot use %s operator with combined queryset."
combinators = ["union"]
if connection.features.supports_select_difference:
combinators.append("difference")
if connection.features.supports_select_intersection:
combinators.append("intersection")
operators = [
("|", operator.or_),
("&", operator.and_),
("^", operator.xor),
]
for combinator in combinators:
combined_qs = getattr(qs, combinator)(qs)
for operator_, operator_func in operators:
with self.subTest(combinator=combinator):
with self.assertRaisesMessage(TypeError, msg % operator_):
operator_func(qs, combined_qs)
with self.assertRaisesMessage(TypeError, msg % operator_):
operator_func(combined_qs, qs)
|
3b82f26d0ed52c81ad2df6e7f4a28a2e50095e7ba351850c6ef549e707975295 | from django.db import models
from django.test import TestCase
from .models import (
Book,
Car,
CustomManager,
CustomQuerySet,
DeconstructibleCustomManager,
FastCarAsBase,
FastCarAsDefault,
FunPerson,
OneToOneRestrictedModel,
Person,
PersonFromAbstract,
PersonManager,
PublishedBookManager,
RelatedModel,
RestrictedModel,
)
class CustomManagerTests(TestCase):
custom_manager_names = [
"custom_queryset_default_manager",
"custom_queryset_custom_manager",
]
@classmethod
def setUpTestData(cls):
cls.b1 = Book.published_objects.create(
title="How to program", author="Rodney Dangerfield", is_published=True
)
cls.b2 = Book.published_objects.create(
title="How to be smart", author="Albert Einstein", is_published=False
)
cls.p1 = Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
cls.droopy = Person.objects.create(
first_name="Droopy", last_name="Dog", fun=False
)
def test_custom_manager_basic(self):
"""
Test a custom Manager method.
"""
self.assertQuerysetEqual(Person.objects.get_fun_people(), ["Bugs Bunny"], str)
def test_queryset_copied_to_default(self):
"""
The methods of a custom QuerySet are properly copied onto the
default Manager.
"""
for manager_name in self.custom_manager_names:
with self.subTest(manager_name=manager_name):
manager = getattr(Person, manager_name)
# Public methods are copied
manager.public_method()
# Private methods are not copied
with self.assertRaises(AttributeError):
manager._private_method()
def test_manager_honors_queryset_only(self):
for manager_name in self.custom_manager_names:
with self.subTest(manager_name=manager_name):
manager = getattr(Person, manager_name)
# Methods with queryset_only=False are copied even if they are private.
manager._optin_private_method()
# Methods with queryset_only=True aren't copied even if they are public.
msg = (
"%r object has no attribute 'optout_public_method'"
% manager.__class__.__name__
)
with self.assertRaisesMessage(AttributeError, msg):
manager.optout_public_method()
def test_manager_use_queryset_methods(self):
"""
Custom manager will use the queryset methods
"""
for manager_name in self.custom_manager_names:
with self.subTest(manager_name=manager_name):
manager = getattr(Person, manager_name)
queryset = manager.filter()
self.assertQuerysetEqual(queryset, ["Bugs Bunny"], str)
self.assertIs(queryset._filter_CustomQuerySet, True)
# Specialized querysets inherit from our custom queryset.
queryset = manager.values_list("first_name", flat=True).filter()
self.assertEqual(list(queryset), ["Bugs"])
self.assertIs(queryset._filter_CustomQuerySet, True)
self.assertIsInstance(queryset.values(), CustomQuerySet)
self.assertIsInstance(queryset.values().values(), CustomQuerySet)
self.assertIsInstance(queryset.values_list().values(), CustomQuerySet)
def test_init_args(self):
"""
The custom manager __init__() argument has been set.
"""
self.assertEqual(Person.custom_queryset_custom_manager.init_arg, "hello")
def test_manager_attributes(self):
"""
Custom manager method is only available on the manager and not on
querysets.
"""
Person.custom_queryset_custom_manager.manager_only()
msg = "'CustomQuerySet' object has no attribute 'manager_only'"
with self.assertRaisesMessage(AttributeError, msg):
Person.custom_queryset_custom_manager.all().manager_only()
def test_queryset_and_manager(self):
"""
Queryset method doesn't override the custom manager method.
"""
queryset = Person.custom_queryset_custom_manager.filter()
self.assertQuerysetEqual(queryset, ["Bugs Bunny"], str)
self.assertIs(queryset._filter_CustomManager, True)
def test_related_manager(self):
"""
The related managers extend the default manager.
"""
self.assertIsInstance(self.droopy.books, PublishedBookManager)
self.assertIsInstance(self.b2.authors, PersonManager)
def test_no_objects(self):
"""
The default manager, "objects", doesn't exist, because a custom one
was provided.
"""
msg = "type object 'Book' has no attribute 'objects'"
with self.assertRaisesMessage(AttributeError, msg):
Book.objects
def test_filtering(self):
"""
Custom managers respond to usual filtering methods
"""
self.assertQuerysetEqual(
Book.published_objects.all(),
[
"How to program",
],
lambda b: b.title,
)
def test_fk_related_manager(self):
Person.objects.create(
first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1
)
Person.objects.create(
first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1
)
FunPerson.objects.create(
first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1
)
FunPerson.objects.create(
first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1
)
self.assertQuerysetEqual(
self.b1.favorite_books.order_by("first_name").all(),
[
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.fun_people_favorite_books.all(),
[
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_books(manager="boring_people").all(),
[
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_books(manager="fun_people").all(),
[
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
def test_gfk_related_manager(self):
Person.objects.create(
first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1
)
Person.objects.create(
first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1
)
FunPerson.objects.create(
first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1
)
FunPerson.objects.create(
first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1
)
self.assertQuerysetEqual(
self.b1.favorite_things.all(),
[
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.fun_people_favorite_things.all(),
[
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_things(manager="boring_people").all(),
[
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_things(manager="fun_people").all(),
[
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
def test_m2m_related_manager(self):
bugs = Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
self.b1.authors.add(bugs)
droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False)
self.b1.authors.add(droopy)
bugs = FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
self.b1.fun_authors.add(bugs)
droopy = FunPerson.objects.create(
first_name="Droopy", last_name="Dog", fun=False
)
self.b1.fun_authors.add(droopy)
self.assertQuerysetEqual(
self.b1.authors.order_by("first_name").all(),
[
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.fun_authors.order_by("first_name").all(),
[
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.authors(manager="boring_people").all(),
[
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.authors(manager="fun_people").all(),
[
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
def test_removal_through_default_fk_related_manager(self, bulk=True):
bugs = FunPerson.objects.create(
first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1
)
droopy = FunPerson.objects.create(
first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1
)
self.b1.fun_people_favorite_books.remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.filter(favorite_book=self.b1),
[
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.b1.fun_people_favorite_books.remove(bugs, bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.filter(favorite_book=self.b1),
[
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
bugs.favorite_book = self.b1
bugs.save()
self.b1.fun_people_favorite_books.clear(bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.filter(favorite_book=self.b1),
[
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
def test_slow_removal_through_default_fk_related_manager(self):
self.test_removal_through_default_fk_related_manager(bulk=False)
def test_removal_through_specified_fk_related_manager(self, bulk=True):
Person.objects.create(
first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1
)
droopy = Person.objects.create(
first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1
)
# The fun manager DOESN'T remove boring people.
self.b1.favorite_books(manager="fun_people").remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_books(manager="boring_people").all(),
[
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
# The boring manager DOES remove boring people.
self.b1.favorite_books(manager="boring_people").remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_books(manager="boring_people").all(),
[],
lambda c: c.first_name,
ordered=False,
)
droopy.favorite_book = self.b1
droopy.save()
# The fun manager ONLY clears fun people.
self.b1.favorite_books(manager="fun_people").clear(bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_books(manager="boring_people").all(),
[
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_books(manager="fun_people").all(),
[],
lambda c: c.first_name,
ordered=False,
)
def test_slow_removal_through_specified_fk_related_manager(self):
self.test_removal_through_specified_fk_related_manager(bulk=False)
def test_removal_through_default_gfk_related_manager(self, bulk=True):
bugs = FunPerson.objects.create(
first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1
)
droopy = FunPerson.objects.create(
first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1
)
self.b1.fun_people_favorite_things.remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.order_by("first_name").filter(
favorite_thing_id=self.b1.pk
),
[
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.b1.fun_people_favorite_things.remove(bugs, bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.order_by("first_name").filter(
favorite_thing_id=self.b1.pk
),
[
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
bugs.favorite_book = self.b1
bugs.save()
self.b1.fun_people_favorite_things.clear(bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.order_by("first_name").filter(
favorite_thing_id=self.b1.pk
),
[
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
def test_slow_removal_through_default_gfk_related_manager(self):
self.test_removal_through_default_gfk_related_manager(bulk=False)
def test_removal_through_specified_gfk_related_manager(self, bulk=True):
Person.objects.create(
first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1
)
droopy = Person.objects.create(
first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1
)
# The fun manager DOESN'T remove boring people.
self.b1.favorite_things(manager="fun_people").remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_things(manager="boring_people").all(),
[
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
# The boring manager DOES remove boring people.
self.b1.favorite_things(manager="boring_people").remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_things(manager="boring_people").all(),
[],
lambda c: c.first_name,
ordered=False,
)
droopy.favorite_thing = self.b1
droopy.save()
# The fun manager ONLY clears fun people.
self.b1.favorite_things(manager="fun_people").clear(bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_things(manager="boring_people").all(),
[
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_things(manager="fun_people").all(),
[],
lambda c: c.first_name,
ordered=False,
)
def test_slow_removal_through_specified_gfk_related_manager(self):
self.test_removal_through_specified_gfk_related_manager(bulk=False)
def test_removal_through_default_m2m_related_manager(self):
bugs = FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
self.b1.fun_authors.add(bugs)
droopy = FunPerson.objects.create(
first_name="Droopy", last_name="Dog", fun=False
)
self.b1.fun_authors.add(droopy)
self.b1.fun_authors.remove(droopy)
self.assertQuerysetEqual(
self.b1.fun_authors.through._default_manager.all(),
[
"Bugs",
"Droopy",
],
lambda c: c.funperson.first_name,
ordered=False,
)
self.b1.fun_authors.remove(bugs)
self.assertQuerysetEqual(
self.b1.fun_authors.through._default_manager.all(),
[
"Droopy",
],
lambda c: c.funperson.first_name,
ordered=False,
)
self.b1.fun_authors.add(bugs)
self.b1.fun_authors.clear()
self.assertQuerysetEqual(
self.b1.fun_authors.through._default_manager.all(),
[
"Droopy",
],
lambda c: c.funperson.first_name,
ordered=False,
)
def test_removal_through_specified_m2m_related_manager(self):
bugs = Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
self.b1.authors.add(bugs)
droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False)
self.b1.authors.add(droopy)
# The fun manager DOESN'T remove boring people.
self.b1.authors(manager="fun_people").remove(droopy)
self.assertQuerysetEqual(
self.b1.authors(manager="boring_people").all(),
[
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
# The boring manager DOES remove boring people.
self.b1.authors(manager="boring_people").remove(droopy)
self.assertQuerysetEqual(
self.b1.authors(manager="boring_people").all(),
[],
lambda c: c.first_name,
ordered=False,
)
self.b1.authors.add(droopy)
# The fun manager ONLY clears fun people.
self.b1.authors(manager="fun_people").clear()
self.assertQuerysetEqual(
self.b1.authors(manager="boring_people").all(),
[
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.authors(manager="fun_people").all(),
[],
lambda c: c.first_name,
ordered=False,
)
def test_deconstruct_default(self):
mgr = models.Manager()
as_manager, mgr_path, qs_path, args, kwargs = mgr.deconstruct()
self.assertFalse(as_manager)
self.assertEqual(mgr_path, "django.db.models.manager.Manager")
self.assertEqual(args, ())
self.assertEqual(kwargs, {})
def test_deconstruct_as_manager(self):
mgr = CustomQuerySet.as_manager()
as_manager, mgr_path, qs_path, args, kwargs = mgr.deconstruct()
self.assertTrue(as_manager)
self.assertEqual(qs_path, "custom_managers.models.CustomQuerySet")
def test_deconstruct_from_queryset(self):
mgr = DeconstructibleCustomManager("a", "b")
as_manager, mgr_path, qs_path, args, kwargs = mgr.deconstruct()
self.assertFalse(as_manager)
self.assertEqual(
mgr_path, "custom_managers.models.DeconstructibleCustomManager"
)
self.assertEqual(
args,
(
"a",
"b",
),
)
self.assertEqual(kwargs, {})
mgr = DeconstructibleCustomManager("x", "y", c=3, d=4)
as_manager, mgr_path, qs_path, args, kwargs = mgr.deconstruct()
self.assertFalse(as_manager)
self.assertEqual(
mgr_path, "custom_managers.models.DeconstructibleCustomManager"
)
self.assertEqual(
args,
(
"x",
"y",
),
)
self.assertEqual(kwargs, {"c": 3, "d": 4})
def test_deconstruct_from_queryset_failing(self):
mgr = CustomManager("arg")
msg = (
"Could not find manager BaseCustomManagerFromCustomQuerySet in "
"django.db.models.manager.\n"
"Please note that you need to inherit from managers you "
"dynamically generated with 'from_queryset()'."
)
with self.assertRaisesMessage(ValueError, msg):
mgr.deconstruct()
def test_abstract_model_with_custom_manager_name(self):
"""
A custom manager may be defined on an abstract model.
It will be inherited by the abstract model's children.
"""
PersonFromAbstract.abstract_persons.create(objects="Test")
self.assertQuerysetEqual(
PersonFromAbstract.abstract_persons.all(),
["Test"],
lambda c: c.objects,
)
class TestCars(TestCase):
def test_managers(self):
# Each model class gets a "_default_manager" attribute, which is a
# reference to the first manager defined in the class.
Car.cars.create(name="Corvette", mileage=21, top_speed=180)
Car.cars.create(name="Neon", mileage=31, top_speed=100)
self.assertQuerysetEqual(
Car._default_manager.order_by("name"),
[
"Corvette",
"Neon",
],
lambda c: c.name,
)
self.assertQuerysetEqual(
Car.cars.order_by("name"),
[
"Corvette",
"Neon",
],
lambda c: c.name,
)
# alternate manager
self.assertQuerysetEqual(
Car.fast_cars.all(),
[
"Corvette",
],
lambda c: c.name,
)
# explicit default manager
self.assertQuerysetEqual(
FastCarAsDefault.cars.order_by("name"),
[
"Corvette",
"Neon",
],
lambda c: c.name,
)
self.assertQuerysetEqual(
FastCarAsDefault._default_manager.all(),
[
"Corvette",
],
lambda c: c.name,
)
# explicit base manager
self.assertQuerysetEqual(
FastCarAsBase.cars.order_by("name"),
[
"Corvette",
"Neon",
],
lambda c: c.name,
)
self.assertQuerysetEqual(
FastCarAsBase._base_manager.all(),
[
"Corvette",
],
lambda c: c.name,
)
class CustomManagersRegressTestCase(TestCase):
def test_filtered_default_manager(self):
"""Even though the default manager filters out some records,
we must still be able to save (particularly, save by updating
existing records) those filtered instances. This is a
regression test for #8990, #9527"""
related = RelatedModel.objects.create(name="xyzzy")
obj = RestrictedModel.objects.create(name="hidden", related=related)
obj.name = "still hidden"
obj.save()
# If the hidden object wasn't seen during the save process,
# there would now be two objects in the database.
self.assertEqual(RestrictedModel.plain_manager.count(), 1)
def test_refresh_from_db_when_default_manager_filters(self):
"""
Model.refresh_from_db() works for instances hidden by the default
manager.
"""
book = Book._base_manager.create(is_published=False)
Book._base_manager.filter(pk=book.pk).update(title="Hi")
book.refresh_from_db()
self.assertEqual(book.title, "Hi")
def test_save_clears_annotations_from_base_manager(self):
"""Model.save() clears annotations from the base manager."""
self.assertEqual(Book._meta.base_manager.name, "annotated_objects")
book = Book.annotated_objects.create(title="Hunting")
Person.objects.create(
first_name="Bugs",
last_name="Bunny",
fun=True,
favorite_book=book,
favorite_thing_id=1,
)
book = Book.annotated_objects.first()
self.assertEqual(book.favorite_avg, 1) # Annotation from the manager.
book.title = "New Hunting"
# save() fails if annotations that involve related fields aren't
# cleared before the update query.
book.save()
self.assertEqual(Book.annotated_objects.first().title, "New Hunting")
def test_delete_related_on_filtered_manager(self):
"""Deleting related objects should also not be distracted by a
restricted manager on the related object. This is a regression
test for #2698."""
related = RelatedModel.objects.create(name="xyzzy")
for name, public in (("one", True), ("two", False), ("three", False)):
RestrictedModel.objects.create(name=name, is_public=public, related=related)
obj = RelatedModel.objects.get(name="xyzzy")
obj.delete()
# All of the RestrictedModel instances should have been
# deleted, since they *all* pointed to the RelatedModel. If
# the default manager is used, only the public one will be
# deleted.
self.assertEqual(len(RestrictedModel.plain_manager.all()), 0)
def test_delete_one_to_one_manager(self):
# The same test case as the last one, but for one-to-one
# models, which are implemented slightly different internally,
# so it's a different code path.
obj = RelatedModel.objects.create(name="xyzzy")
OneToOneRestrictedModel.objects.create(name="foo", is_public=False, related=obj)
obj = RelatedModel.objects.get(name="xyzzy")
obj.delete()
self.assertEqual(len(OneToOneRestrictedModel.plain_manager.all()), 0)
def test_queryset_with_custom_init(self):
"""
BaseManager.get_queryset() should use kwargs rather than args to allow
custom kwargs (#24911).
"""
qs_custom = Person.custom_init_queryset_manager.all()
qs_default = Person.objects.all()
self.assertQuerysetEqual(qs_custom, qs_default)
|
668a20982df879ebbecd77e3db26aeb3d932cf76f00550c685d0f213dfc30c2e | """
Regression tests for Model inheritance behavior.
"""
import datetime
from operator import attrgetter
from unittest import expectedFailure
from django import forms
from django.test import TestCase
from .models import (
ArticleWithAuthor,
BachelorParty,
BirthdayParty,
BusStation,
Child,
Congressman,
DerivedM,
InternalCertificationAudit,
ItalianRestaurant,
M2MChild,
MessyBachelorParty,
ParkingLot,
ParkingLot3,
ParkingLot4A,
ParkingLot4B,
Person,
Place,
Politician,
Profile,
QualityControl,
Restaurant,
SelfRefChild,
SelfRefParent,
Senator,
Supplier,
TrainStation,
User,
Wholesaler,
)
class ModelInheritanceTest(TestCase):
def test_model_inheritance(self):
# Regression for #7350, #7202
# When you create a Parent object with a specific reference to an
# existent child instance, saving the Parent doesn't duplicate the
# child. This behavior is only activated during a raw save - it is
# mostly relevant to deserialization, but any sort of CORBA style
# 'narrow()' API would require a similar approach.
# Create a child-parent-grandparent chain
place1 = Place(name="Guido's House of Pasta", address="944 W. Fullerton")
place1.save_base(raw=True)
restaurant = Restaurant(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False,
)
restaurant.save_base(raw=True)
italian_restaurant = ItalianRestaurant(
restaurant_ptr=restaurant, serves_gnocchi=True
)
italian_restaurant.save_base(raw=True)
# Create a child-parent chain with an explicit parent link
place2 = Place(name="Main St", address="111 Main St")
place2.save_base(raw=True)
park = ParkingLot(parent=place2, capacity=100)
park.save_base(raw=True)
# No extra parent objects have been created.
places = list(Place.objects.all())
self.assertEqual(places, [place1, place2])
dicts = list(Restaurant.objects.values("name", "serves_hot_dogs"))
self.assertEqual(
dicts, [{"name": "Guido's House of Pasta", "serves_hot_dogs": True}]
)
dicts = list(
ItalianRestaurant.objects.values(
"name", "serves_hot_dogs", "serves_gnocchi"
)
)
self.assertEqual(
dicts,
[
{
"name": "Guido's House of Pasta",
"serves_gnocchi": True,
"serves_hot_dogs": True,
}
],
)
dicts = list(ParkingLot.objects.values("name", "capacity"))
self.assertEqual(
dicts,
[
{
"capacity": 100,
"name": "Main St",
}
],
)
# You can also update objects when using a raw save.
place1.name = "Guido's All New House of Pasta"
place1.save_base(raw=True)
restaurant.serves_hot_dogs = False
restaurant.save_base(raw=True)
italian_restaurant.serves_gnocchi = False
italian_restaurant.save_base(raw=True)
place2.name = "Derelict lot"
place2.save_base(raw=True)
park.capacity = 50
park.save_base(raw=True)
# No extra parent objects after an update, either.
places = list(Place.objects.all())
self.assertEqual(places, [place2, place1])
self.assertEqual(places[0].name, "Derelict lot")
self.assertEqual(places[1].name, "Guido's All New House of Pasta")
dicts = list(Restaurant.objects.values("name", "serves_hot_dogs"))
self.assertEqual(
dicts,
[
{
"name": "Guido's All New House of Pasta",
"serves_hot_dogs": False,
}
],
)
dicts = list(
ItalianRestaurant.objects.values(
"name", "serves_hot_dogs", "serves_gnocchi"
)
)
self.assertEqual(
dicts,
[
{
"name": "Guido's All New House of Pasta",
"serves_gnocchi": False,
"serves_hot_dogs": False,
}
],
)
dicts = list(ParkingLot.objects.values("name", "capacity"))
self.assertEqual(
dicts,
[
{
"capacity": 50,
"name": "Derelict lot",
}
],
)
# If you try to raw_save a parent attribute onto a child object,
# the attribute will be ignored.
italian_restaurant.name = "Lorenzo's Pasta Hut"
italian_restaurant.save_base(raw=True)
# Note that the name has not changed
# - name is an attribute of Place, not ItalianRestaurant
dicts = list(
ItalianRestaurant.objects.values(
"name", "serves_hot_dogs", "serves_gnocchi"
)
)
self.assertEqual(
dicts,
[
{
"name": "Guido's All New House of Pasta",
"serves_gnocchi": False,
"serves_hot_dogs": False,
}
],
)
def test_issue_7105(self):
# Regressions tests for #7105: dates() queries should be able to use
# fields from the parent model as easily as the child.
Child.objects.create(
name="child", created=datetime.datetime(2008, 6, 26, 17, 0, 0)
)
datetimes = list(Child.objects.datetimes("created", "month"))
self.assertEqual(datetimes, [datetime.datetime(2008, 6, 1, 0, 0)])
def test_issue_7276(self):
# Regression test for #7276: calling delete() on a model with
# multi-table inheritance should delete the associated rows from any
# ancestor tables, as well as any descendent objects.
place1 = Place(name="Guido's House of Pasta", address="944 W. Fullerton")
place1.save_base(raw=True)
restaurant = Restaurant(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False,
)
restaurant.save_base(raw=True)
italian_restaurant = ItalianRestaurant(
restaurant_ptr=restaurant, serves_gnocchi=True
)
italian_restaurant.save_base(raw=True)
ident = ItalianRestaurant.objects.all()[0].id
self.assertEqual(Place.objects.get(pk=ident), place1)
Restaurant.objects.create(
name="a",
address="xx",
serves_hot_dogs=True,
serves_pizza=False,
)
# This should delete both Restaurants, plus the related places, plus
# the ItalianRestaurant.
Restaurant.objects.all().delete()
with self.assertRaises(Place.DoesNotExist):
Place.objects.get(pk=ident)
with self.assertRaises(ItalianRestaurant.DoesNotExist):
ItalianRestaurant.objects.get(pk=ident)
def test_issue_6755(self):
"""
Regression test for #6755
"""
r = Restaurant(serves_pizza=False, serves_hot_dogs=False)
r.save()
self.assertEqual(r.id, r.place_ptr_id)
orig_id = r.id
r = Restaurant(place_ptr_id=orig_id, serves_pizza=True, serves_hot_dogs=False)
r.save()
self.assertEqual(r.id, orig_id)
self.assertEqual(r.id, r.place_ptr_id)
def test_issue_11764(self):
"""
Regression test for #11764
"""
wholesalers = list(Wholesaler.objects.select_related())
self.assertEqual(wholesalers, [])
def test_issue_7853(self):
"""
Regression test for #7853
If the parent class has a self-referential link, make sure that any
updates to that link via the child update the right table.
"""
obj = SelfRefChild.objects.create(child_data=37, parent_data=42)
obj.delete()
def test_get_next_previous_by_date(self):
"""
Regression tests for #8076
get_(next/previous)_by_date should work
"""
c1 = ArticleWithAuthor(
headline="ArticleWithAuthor 1",
author="Person 1",
pub_date=datetime.datetime(2005, 8, 1, 3, 0),
)
c1.save()
c2 = ArticleWithAuthor(
headline="ArticleWithAuthor 2",
author="Person 2",
pub_date=datetime.datetime(2005, 8, 1, 10, 0),
)
c2.save()
c3 = ArticleWithAuthor(
headline="ArticleWithAuthor 3",
author="Person 3",
pub_date=datetime.datetime(2005, 8, 2),
)
c3.save()
self.assertEqual(c1.get_next_by_pub_date(), c2)
self.assertEqual(c2.get_next_by_pub_date(), c3)
with self.assertRaises(ArticleWithAuthor.DoesNotExist):
c3.get_next_by_pub_date()
self.assertEqual(c3.get_previous_by_pub_date(), c2)
self.assertEqual(c2.get_previous_by_pub_date(), c1)
with self.assertRaises(ArticleWithAuthor.DoesNotExist):
c1.get_previous_by_pub_date()
def test_inherited_fields(self):
"""
Regression test for #8825 and #9390
Make sure all inherited fields (esp. m2m fields, in this case) appear
on the child class.
"""
m2mchildren = list(M2MChild.objects.filter(articles__isnull=False))
self.assertEqual(m2mchildren, [])
# Ordering should not include any database column more than once (this
# is most likely to occur naturally with model inheritance, so we
# check it here). Regression test for #9390. This necessarily pokes at
# the SQL string for the query, since the duplicate problems are only
# apparent at that late stage.
qs = ArticleWithAuthor.objects.order_by("pub_date", "pk")
sql = qs.query.get_compiler(qs.db).as_sql()[0]
fragment = sql[sql.find("ORDER BY") :]
pos = fragment.find("pub_date")
self.assertEqual(fragment.find("pub_date", pos + 1), -1)
def test_queryset_update_on_parent_model(self):
"""
Regression test for #10362
It is possible to call update() and only change a field in
an ancestor model.
"""
article = ArticleWithAuthor.objects.create(
author="fred",
headline="Hey there!",
pub_date=datetime.datetime(2009, 3, 1, 8, 0, 0),
)
update = ArticleWithAuthor.objects.filter(author="fred").update(
headline="Oh, no!"
)
self.assertEqual(update, 1)
update = ArticleWithAuthor.objects.filter(pk=article.pk).update(
headline="Oh, no!"
)
self.assertEqual(update, 1)
derivedm1 = DerivedM.objects.create(
customPK=44,
base_name="b1",
derived_name="d1",
)
self.assertEqual(derivedm1.customPK, 44)
self.assertEqual(derivedm1.base_name, "b1")
self.assertEqual(derivedm1.derived_name, "d1")
derivedms = list(DerivedM.objects.all())
self.assertEqual(derivedms, [derivedm1])
def test_use_explicit_o2o_to_parent_as_pk(self):
"""
The connector from child to parent need not be the pk on the child.
"""
self.assertEqual(ParkingLot3._meta.pk.name, "primary_key")
# the child->parent link
self.assertEqual(ParkingLot3._meta.get_ancestor_link(Place).name, "parent")
def test_use_explicit_o2o_to_parent_from_abstract_model(self):
self.assertEqual(ParkingLot4A._meta.pk.name, "parent")
ParkingLot4A.objects.create(
name="Parking4A",
address="21 Jump Street",
)
self.assertEqual(ParkingLot4B._meta.pk.name, "parent")
ParkingLot4A.objects.create(
name="Parking4B",
address="21 Jump Street",
)
def test_all_fields_from_abstract_base_class(self):
"""
Regression tests for #7588
"""
# All fields from an ABC, including those inherited non-abstractly
# should be available on child classes (#7588). Creating this instance
# should work without error.
QualityControl.objects.create(
headline="Problems in Django",
pub_date=datetime.datetime.now(),
quality=10,
assignee="adrian",
)
def test_abstract_base_class_m2m_relation_inheritance(self):
# many-to-many relations defined on an abstract base class are
# correctly inherited (and created) on the child class.
p1 = Person.objects.create(name="Alice")
p2 = Person.objects.create(name="Bob")
p3 = Person.objects.create(name="Carol")
p4 = Person.objects.create(name="Dave")
birthday = BirthdayParty.objects.create(name="Birthday party for Alice")
birthday.attendees.set([p1, p3])
bachelor = BachelorParty.objects.create(name="Bachelor party for Bob")
bachelor.attendees.set([p2, p4])
parties = list(p1.birthdayparty_set.all())
self.assertEqual(parties, [birthday])
parties = list(p1.bachelorparty_set.all())
self.assertEqual(parties, [])
parties = list(p2.bachelorparty_set.all())
self.assertEqual(parties, [bachelor])
# A subclass of a subclass of an abstract model doesn't get its own
# accessor.
self.assertFalse(hasattr(p2, "messybachelorparty_set"))
# ... but it does inherit the m2m from its parent
messy = MessyBachelorParty.objects.create(name="Bachelor party for Dave")
messy.attendees.set([p4])
messy_parent = messy.bachelorparty_ptr
parties = list(p4.bachelorparty_set.all())
self.assertEqual(parties, [bachelor, messy_parent])
def test_abstract_verbose_name_plural_inheritance(self):
"""
verbose_name_plural correctly inherited from ABC if inheritance chain
includes an abstract model.
"""
# Regression test for #11369: verbose_name_plural should be inherited
# from an ABC even when there are one or more intermediate
# abstract models in the inheritance chain, for consistency with
# verbose_name.
self.assertEqual(InternalCertificationAudit._meta.verbose_name_plural, "Audits")
def test_inherited_nullable_exclude(self):
obj = SelfRefChild.objects.create(child_data=37, parent_data=42)
self.assertQuerysetEqual(
SelfRefParent.objects.exclude(self_data=72), [obj.pk], attrgetter("pk")
)
self.assertQuerysetEqual(
SelfRefChild.objects.exclude(self_data=72), [obj.pk], attrgetter("pk")
)
def test_concrete_abstract_concrete_pk(self):
"""
Primary key set correctly with concrete->abstract->concrete inheritance.
"""
# Regression test for #13987: Primary key is incorrectly determined
# when more than one model has a concrete->abstract->concrete
# inheritance hierarchy.
self.assertEqual(
len(
[field for field in BusStation._meta.local_fields if field.primary_key]
),
1,
)
self.assertEqual(
len(
[
field
for field in TrainStation._meta.local_fields
if field.primary_key
]
),
1,
)
self.assertIs(BusStation._meta.pk.model, BusStation)
self.assertIs(TrainStation._meta.pk.model, TrainStation)
def test_inherited_unique_field_with_form(self):
"""
A model which has different primary key for the parent model passes
unique field checking correctly (#17615).
"""
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = "__all__"
User.objects.create(username="user_only")
p = Profile.objects.create(username="user_with_profile")
form = ProfileForm(
{"username": "user_with_profile", "extra": "hello"}, instance=p
)
self.assertTrue(form.is_valid())
def test_inheritance_joins(self):
# Test for #17502 - check that filtering through two levels of
# inheritance chain doesn't generate extra joins.
qs = ItalianRestaurant.objects.all()
self.assertEqual(str(qs.query).count("JOIN"), 2)
qs = ItalianRestaurant.objects.filter(name="foo")
self.assertEqual(str(qs.query).count("JOIN"), 2)
@expectedFailure
def test_inheritance_values_joins(self):
# It would be nice (but not too important) to skip the middle join in
# this case. Skipping is possible as nothing from the middle model is
# used in the qs and top contains direct pointer to the bottom model.
qs = ItalianRestaurant.objects.values_list("serves_gnocchi").filter(name="foo")
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_issue_21554(self):
senator = Senator.objects.create(name="John Doe", title="X", state="Y")
senator = Senator.objects.get(pk=senator.pk)
self.assertEqual(senator.name, "John Doe")
self.assertEqual(senator.title, "X")
self.assertEqual(senator.state, "Y")
def test_inheritance_resolve_columns(self):
Restaurant.objects.create(
name="Bobs Cafe",
address="Somewhere",
serves_pizza=True,
serves_hot_dogs=True,
)
p = Place.objects.select_related("restaurant")[0]
self.assertIsInstance(p.restaurant.serves_pizza, bool)
def test_inheritance_select_related(self):
# Regression test for #7246
r1 = Restaurant.objects.create(
name="Nobu", serves_hot_dogs=True, serves_pizza=False
)
r2 = Restaurant.objects.create(
name="Craft", serves_hot_dogs=False, serves_pizza=True
)
Supplier.objects.create(name="John", restaurant=r1)
Supplier.objects.create(name="Jane", restaurant=r2)
self.assertQuerysetEqual(
Supplier.objects.order_by("name").select_related(),
[
"Jane",
"John",
],
attrgetter("name"),
)
jane = Supplier.objects.order_by("name").select_related("restaurant")[0]
self.assertEqual(jane.restaurant.name, "Craft")
def test_filter_with_parent_fk(self):
r = Restaurant.objects.create()
s = Supplier.objects.create(restaurant=r)
# The mismatch between Restaurant and Place is intentional (#28175).
self.assertSequenceEqual(
Supplier.objects.filter(restaurant__in=Place.objects.all()), [s]
)
def test_ptr_accessor_assigns_state(self):
r = Restaurant.objects.create()
self.assertIs(r.place_ptr._state.adding, False)
self.assertEqual(r.place_ptr._state.db, "default")
def test_related_filtering_query_efficiency_ticket_15844(self):
r = Restaurant.objects.create(
name="Guido's House of Pasta",
address="944 W. Fullerton",
serves_hot_dogs=True,
serves_pizza=False,
)
s = Supplier.objects.create(restaurant=r)
with self.assertNumQueries(1):
self.assertSequenceEqual(Supplier.objects.filter(restaurant=r), [s])
with self.assertNumQueries(1):
self.assertSequenceEqual(r.supplier_set.all(), [s])
def test_queries_on_parent_access(self):
italian_restaurant = ItalianRestaurant.objects.create(
name="Guido's House of Pasta",
address="944 W. Fullerton",
serves_hot_dogs=True,
serves_pizza=False,
serves_gnocchi=True,
)
# No queries are made when accessing the parent objects.
italian_restaurant = ItalianRestaurant.objects.get(pk=italian_restaurant.pk)
with self.assertNumQueries(0):
restaurant = italian_restaurant.restaurant_ptr
self.assertEqual(restaurant.place_ptr.restaurant, restaurant)
self.assertEqual(restaurant.italianrestaurant, italian_restaurant)
# One query is made when accessing the parent objects when the instance
# is deferred.
italian_restaurant = ItalianRestaurant.objects.only("serves_gnocchi").get(
pk=italian_restaurant.pk
)
with self.assertNumQueries(1):
restaurant = italian_restaurant.restaurant_ptr
self.assertEqual(restaurant.place_ptr.restaurant, restaurant)
self.assertEqual(restaurant.italianrestaurant, italian_restaurant)
# No queries are made when accessing the parent objects when the
# instance has deferred a field not present in the parent table.
italian_restaurant = ItalianRestaurant.objects.defer("serves_gnocchi").get(
pk=italian_restaurant.pk
)
with self.assertNumQueries(0):
restaurant = italian_restaurant.restaurant_ptr
self.assertEqual(restaurant.place_ptr.restaurant, restaurant)
self.assertEqual(restaurant.italianrestaurant, italian_restaurant)
def test_id_field_update_on_ancestor_change(self):
place1 = Place.objects.create(name="House of Pasta", address="944 Fullerton")
place2 = Place.objects.create(name="House of Pizza", address="954 Fullerton")
place3 = Place.objects.create(name="Burger house", address="964 Fullerton")
restaurant1 = Restaurant.objects.create(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False,
)
restaurant2 = Restaurant.objects.create(
place_ptr=place2,
serves_hot_dogs=True,
serves_pizza=False,
)
italian_restaurant = ItalianRestaurant.objects.create(
restaurant_ptr=restaurant1,
serves_gnocchi=True,
)
# Changing the parent of a restaurant changes the restaurant's ID & PK.
restaurant1.place_ptr = place3
self.assertEqual(restaurant1.pk, place3.pk)
self.assertEqual(restaurant1.id, place3.id)
self.assertEqual(restaurant1.pk, restaurant1.id)
restaurant1.place_ptr = None
self.assertIsNone(restaurant1.pk)
self.assertIsNone(restaurant1.id)
# Changing the parent of an italian restaurant changes the restaurant's
# ID & PK.
italian_restaurant.restaurant_ptr = restaurant2
self.assertEqual(italian_restaurant.pk, restaurant2.pk)
self.assertEqual(italian_restaurant.id, restaurant2.id)
self.assertEqual(italian_restaurant.pk, italian_restaurant.id)
italian_restaurant.restaurant_ptr = None
self.assertIsNone(italian_restaurant.pk)
self.assertIsNone(italian_restaurant.id)
def test_create_new_instance_with_pk_equals_none(self):
p1 = Profile.objects.create(username="john")
p2 = User.objects.get(pk=p1.user_ptr_id).profile
# Create a new profile by setting pk = None.
p2.pk = None
p2.user_ptr_id = None
p2.username = "bill"
p2.save()
self.assertEqual(Profile.objects.count(), 2)
self.assertEqual(User.objects.get(pk=p1.user_ptr_id).username, "john")
def test_create_new_instance_with_pk_equals_none_multi_inheritance(self):
c1 = Congressman.objects.create(state="PA", name="John", title="senator 1")
c2 = Person.objects.get(pk=c1.pk).congressman
# Create a new congressman by setting pk = None.
c2.pk = None
c2.id = None
c2.politician_ptr_id = None
c2.name = "Bill"
c2.title = "senator 2"
c2.save()
self.assertEqual(Congressman.objects.count(), 2)
self.assertEqual(Person.objects.get(pk=c1.pk).name, "John")
self.assertEqual(
Politician.objects.get(pk=c1.politician_ptr_id).title,
"senator 1",
)
def test_mti_update_parent_through_child(self):
Politician.objects.create()
Congressman.objects.create()
Congressman.objects.update(title="senator 1")
self.assertEqual(Congressman.objects.get().title, "senator 1")
def test_mti_update_grand_parent_through_child(self):
Politician.objects.create()
Senator.objects.create()
Senator.objects.update(title="senator 1")
self.assertEqual(Senator.objects.get().title, "senator 1")
|
6497be1fc2a98f367b2fe4372003ab2031d3fcdc1874359defc6e20a62fe7bc0 | from datetime import datetime
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db.models import (
CharField,
Count,
DateTimeField,
F,
Max,
OuterRef,
Subquery,
Value,
)
from django.db.models.functions import Upper
from django.test import TestCase
from .models import (
Article,
Author,
ChildArticle,
OrderedByExpression,
OrderedByExpressionChild,
OrderedByExpressionGrandChild,
OrderedByFArticle,
Reference,
)
class OrderingTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Article.objects.create(
headline="Article 1", pub_date=datetime(2005, 7, 26)
)
cls.a2 = Article.objects.create(
headline="Article 2", pub_date=datetime(2005, 7, 27)
)
cls.a3 = Article.objects.create(
headline="Article 3", pub_date=datetime(2005, 7, 27)
)
cls.a4 = Article.objects.create(
headline="Article 4", pub_date=datetime(2005, 7, 28)
)
cls.author_1 = Author.objects.create(name="Name 1")
cls.author_2 = Author.objects.create(name="Name 2")
for i in range(2):
Author.objects.create()
def test_default_ordering(self):
"""
By default, Article.objects.all() orders by pub_date descending, then
headline ascending.
"""
self.assertQuerysetEqual(
Article.objects.all(),
[
"Article 4",
"Article 2",
"Article 3",
"Article 1",
],
attrgetter("headline"),
)
# Getting a single item should work too:
self.assertEqual(Article.objects.all()[0], self.a4)
def test_default_ordering_override(self):
"""
Override ordering with order_by, which is in the same format as the
ordering attribute in models.
"""
self.assertQuerysetEqual(
Article.objects.order_by("headline"),
[
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.order_by("pub_date", "-headline"),
[
"Article 1",
"Article 3",
"Article 2",
"Article 4",
],
attrgetter("headline"),
)
def test_default_ordering_override_unknown_field(self):
"""
Attempts to override default ordering on related models with an unknown
field should result in an error.
"""
msg = (
"Cannot resolve keyword 'unknown_field' into field. Choices are: "
"article, author, editor, editor_id, id, name"
)
with self.assertRaisesMessage(FieldError, msg):
list(Article.objects.order_by("author__unknown_field"))
def test_order_by_override(self):
"""
Only the last order_by has any effect (since they each override any
previous ordering).
"""
self.assertQuerysetEqual(
Article.objects.order_by("id"),
[
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.order_by("id").order_by("-headline"),
[
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline"),
)
def test_order_by_nulls_first_and_last(self):
msg = "nulls_first and nulls_last are mutually exclusive"
with self.assertRaisesMessage(ValueError, msg):
Article.objects.order_by(
F("author").desc(nulls_last=True, nulls_first=True)
)
def assertQuerysetEqualReversible(self, queryset, sequence):
self.assertSequenceEqual(queryset, sequence)
self.assertSequenceEqual(queryset.reverse(), list(reversed(sequence)))
def test_order_by_nulls_last(self):
Article.objects.filter(headline="Article 3").update(author=self.author_1)
Article.objects.filter(headline="Article 4").update(author=self.author_2)
# asc and desc are chainable with nulls_last.
self.assertQuerysetEqualReversible(
Article.objects.order_by(F("author").desc(nulls_last=True), "headline"),
[self.a4, self.a3, self.a1, self.a2],
)
self.assertQuerysetEqualReversible(
Article.objects.order_by(F("author").asc(nulls_last=True), "headline"),
[self.a3, self.a4, self.a1, self.a2],
)
self.assertQuerysetEqualReversible(
Article.objects.order_by(
Upper("author__name").desc(nulls_last=True), "headline"
),
[self.a4, self.a3, self.a1, self.a2],
)
self.assertQuerysetEqualReversible(
Article.objects.order_by(
Upper("author__name").asc(nulls_last=True), "headline"
),
[self.a3, self.a4, self.a1, self.a2],
)
def test_order_by_nulls_first(self):
Article.objects.filter(headline="Article 3").update(author=self.author_1)
Article.objects.filter(headline="Article 4").update(author=self.author_2)
# asc and desc are chainable with nulls_first.
self.assertQuerysetEqualReversible(
Article.objects.order_by(F("author").asc(nulls_first=True), "headline"),
[self.a1, self.a2, self.a3, self.a4],
)
self.assertQuerysetEqualReversible(
Article.objects.order_by(F("author").desc(nulls_first=True), "headline"),
[self.a1, self.a2, self.a4, self.a3],
)
self.assertQuerysetEqualReversible(
Article.objects.order_by(
Upper("author__name").asc(nulls_first=True), "headline"
),
[self.a1, self.a2, self.a3, self.a4],
)
self.assertQuerysetEqualReversible(
Article.objects.order_by(
Upper("author__name").desc(nulls_first=True), "headline"
),
[self.a1, self.a2, self.a4, self.a3],
)
def test_orders_nulls_first_on_filtered_subquery(self):
Article.objects.filter(headline="Article 1").update(author=self.author_1)
Article.objects.filter(headline="Article 2").update(author=self.author_1)
Article.objects.filter(headline="Article 4").update(author=self.author_2)
Author.objects.filter(name__isnull=True).delete()
author_3 = Author.objects.create(name="Name 3")
article_subquery = (
Article.objects.filter(
author=OuterRef("pk"),
headline__icontains="Article",
)
.order_by()
.values("author")
.annotate(
last_date=Max("pub_date"),
)
.values("last_date")
)
self.assertQuerysetEqualReversible(
Author.objects.annotate(
last_date=Subquery(article_subquery, output_field=DateTimeField())
)
.order_by(F("last_date").asc(nulls_first=True))
.distinct(),
[author_3, self.author_1, self.author_2],
)
def test_stop_slicing(self):
"""
Use the 'stop' part of slicing notation to limit the results.
"""
self.assertQuerysetEqual(
Article.objects.order_by("headline")[:2],
[
"Article 1",
"Article 2",
],
attrgetter("headline"),
)
def test_stop_start_slicing(self):
"""
Use the 'stop' and 'start' parts of slicing notation to offset the
result list.
"""
self.assertQuerysetEqual(
Article.objects.order_by("headline")[1:3],
[
"Article 2",
"Article 3",
],
attrgetter("headline"),
)
def test_random_ordering(self):
"""
Use '?' to order randomly.
"""
self.assertEqual(len(list(Article.objects.order_by("?"))), 4)
def test_reversed_ordering(self):
"""
Ordering can be reversed using the reverse() method on a queryset.
This allows you to extract things like "the last two items" (reverse
and then take the first two).
"""
self.assertQuerysetEqual(
Article.objects.reverse()[:2],
[
"Article 1",
"Article 3",
],
attrgetter("headline"),
)
def test_reverse_ordering_pure(self):
qs1 = Article.objects.order_by(F("headline").asc())
qs2 = qs1.reverse()
self.assertQuerysetEqual(
qs2,
[
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
qs1,
[
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline"),
)
def test_reverse_meta_ordering_pure(self):
Article.objects.create(
headline="Article 5",
pub_date=datetime(2005, 7, 30),
author=self.author_1,
second_author=self.author_2,
)
Article.objects.create(
headline="Article 5",
pub_date=datetime(2005, 7, 30),
author=self.author_2,
second_author=self.author_1,
)
self.assertQuerysetEqual(
Article.objects.filter(headline="Article 5").reverse(),
["Name 2", "Name 1"],
attrgetter("author.name"),
)
self.assertQuerysetEqual(
Article.objects.filter(headline="Article 5"),
["Name 1", "Name 2"],
attrgetter("author.name"),
)
def test_no_reordering_after_slicing(self):
msg = "Cannot reverse a query once a slice has been taken."
qs = Article.objects.all()[0:2]
with self.assertRaisesMessage(TypeError, msg):
qs.reverse()
with self.assertRaisesMessage(TypeError, msg):
qs.last()
def test_extra_ordering(self):
"""
Ordering can be based on fields included from an 'extra' clause
"""
self.assertQuerysetEqual(
Article.objects.extra(
select={"foo": "pub_date"}, order_by=["foo", "headline"]
),
[
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline"),
)
def test_extra_ordering_quoting(self):
"""
If the extra clause uses an SQL keyword for a name, it will be
protected by quoting.
"""
self.assertQuerysetEqual(
Article.objects.extra(
select={"order": "pub_date"}, order_by=["order", "headline"]
),
[
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline"),
)
def test_extra_ordering_with_table_name(self):
self.assertQuerysetEqual(
Article.objects.extra(order_by=["ordering_article.headline"]),
[
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.extra(order_by=["-ordering_article.headline"]),
[
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline"),
)
def test_order_by_pk(self):
"""
'pk' works as an ordering option in Meta.
"""
self.assertEqual(
[a.pk for a in Author.objects.all()],
[a.pk for a in Author.objects.order_by("-pk")],
)
def test_order_by_fk_attname(self):
"""
ordering by a foreign key by its attribute name prevents the query
from inheriting its related model ordering option (#19195).
"""
authors = list(Author.objects.order_by("id"))
for i in range(1, 5):
author = authors[i - 1]
article = getattr(self, "a%d" % (5 - i))
article.author = author
article.save(update_fields={"author"})
self.assertQuerysetEqual(
Article.objects.order_by("author_id"),
[
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline"),
)
def test_order_by_self_referential_fk(self):
self.a1.author = Author.objects.create(editor=self.author_1)
self.a1.save()
self.a2.author = Author.objects.create(editor=self.author_2)
self.a2.save()
self.assertQuerysetEqual(
Article.objects.filter(author__isnull=False).order_by("author__editor"),
["Article 2", "Article 1"],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.filter(author__isnull=False).order_by("author__editor_id"),
["Article 1", "Article 2"],
attrgetter("headline"),
)
def test_order_by_f_expression(self):
self.assertQuerysetEqual(
Article.objects.order_by(F("headline")),
[
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.order_by(F("headline").asc()),
[
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.order_by(F("headline").desc()),
[
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline"),
)
def test_order_by_f_expression_duplicates(self):
"""
A column may only be included once (the first occurrence) so we check
to ensure there are no duplicates by inspecting the SQL.
"""
qs = Article.objects.order_by(F("headline").asc(), F("headline").desc())
sql = str(qs.query).upper()
fragment = sql[sql.find("ORDER BY") :]
self.assertEqual(fragment.count("HEADLINE"), 1)
self.assertQuerysetEqual(
qs,
[
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline"),
)
qs = Article.objects.order_by(F("headline").desc(), F("headline").asc())
sql = str(qs.query).upper()
fragment = sql[sql.find("ORDER BY") :]
self.assertEqual(fragment.count("HEADLINE"), 1)
self.assertQuerysetEqual(
qs,
[
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline"),
)
def test_order_by_constant_value(self):
# Order by annotated constant from selected columns.
qs = Article.objects.annotate(
constant=Value("1", output_field=CharField()),
).order_by("constant", "-headline")
self.assertSequenceEqual(qs, [self.a4, self.a3, self.a2, self.a1])
# Order by annotated constant which is out of selected columns.
self.assertSequenceEqual(
qs.values_list("headline", flat=True),
[
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
)
# Order by constant.
qs = Article.objects.order_by(Value("1", output_field=CharField()), "-headline")
self.assertSequenceEqual(qs, [self.a4, self.a3, self.a2, self.a1])
def test_related_ordering_duplicate_table_reference(self):
"""
An ordering referencing a model with an ordering referencing a model
multiple time no circular reference should be detected (#24654).
"""
first_author = Author.objects.create()
second_author = Author.objects.create()
self.a1.author = first_author
self.a1.second_author = second_author
self.a1.save()
self.a2.author = second_author
self.a2.second_author = first_author
self.a2.save()
r1 = Reference.objects.create(article_id=self.a1.pk)
r2 = Reference.objects.create(article_id=self.a2.pk)
self.assertSequenceEqual(Reference.objects.all(), [r2, r1])
def test_default_ordering_by_f_expression(self):
"""F expressions can be used in Meta.ordering."""
articles = OrderedByFArticle.objects.all()
articles.filter(headline="Article 2").update(author=self.author_2)
articles.filter(headline="Article 3").update(author=self.author_1)
self.assertQuerysetEqual(
articles,
["Article 1", "Article 4", "Article 3", "Article 2"],
attrgetter("headline"),
)
def test_order_by_ptr_field_with_default_ordering_by_expression(self):
ca1 = ChildArticle.objects.create(
headline="h2",
pub_date=datetime(2005, 7, 27),
author=self.author_2,
)
ca2 = ChildArticle.objects.create(
headline="h2",
pub_date=datetime(2005, 7, 27),
author=self.author_1,
)
ca3 = ChildArticle.objects.create(
headline="h3",
pub_date=datetime(2005, 7, 27),
author=self.author_1,
)
ca4 = ChildArticle.objects.create(headline="h1", pub_date=datetime(2005, 7, 28))
articles = ChildArticle.objects.order_by("article_ptr")
self.assertSequenceEqual(articles, [ca4, ca2, ca1, ca3])
def test_default_ordering_does_not_affect_group_by(self):
Article.objects.exclude(headline="Article 4").update(author=self.author_1)
Article.objects.filter(headline="Article 4").update(author=self.author_2)
articles = Article.objects.values("author").annotate(count=Count("author"))
self.assertCountEqual(
articles,
[
{"author": self.author_1.pk, "count": 3},
{"author": self.author_2.pk, "count": 1},
],
)
def test_order_by_parent_fk_with_expression_in_default_ordering(self):
p3 = OrderedByExpression.objects.create(name="oBJ 3")
p2 = OrderedByExpression.objects.create(name="OBJ 2")
p1 = OrderedByExpression.objects.create(name="obj 1")
c3 = OrderedByExpressionChild.objects.create(parent=p3)
c2 = OrderedByExpressionChild.objects.create(parent=p2)
c1 = OrderedByExpressionChild.objects.create(parent=p1)
self.assertSequenceEqual(
OrderedByExpressionChild.objects.order_by("parent"),
[c1, c2, c3],
)
def test_order_by_grandparent_fk_with_expression_in_default_ordering(self):
p3 = OrderedByExpression.objects.create(name="oBJ 3")
p2 = OrderedByExpression.objects.create(name="OBJ 2")
p1 = OrderedByExpression.objects.create(name="obj 1")
c3 = OrderedByExpressionChild.objects.create(parent=p3)
c2 = OrderedByExpressionChild.objects.create(parent=p2)
c1 = OrderedByExpressionChild.objects.create(parent=p1)
g3 = OrderedByExpressionGrandChild.objects.create(parent=c3)
g2 = OrderedByExpressionGrandChild.objects.create(parent=c2)
g1 = OrderedByExpressionGrandChild.objects.create(parent=c1)
self.assertSequenceEqual(
OrderedByExpressionGrandChild.objects.order_by("parent"),
[g1, g2, g3],
)
|
08107f395d462176f01c40a1ea4442cb917b0001ce4bf81e27f33007256e0acd | import datetime
import pickle
import unittest
import uuid
from collections import namedtuple
from copy import deepcopy
from decimal import Decimal
from unittest import mock
from django.core.exceptions import FieldError
from django.db import DatabaseError, NotSupportedError, connection
from django.db.models import (
AutoField,
Avg,
BinaryField,
BooleanField,
Case,
CharField,
Count,
DateField,
DateTimeField,
DecimalField,
DurationField,
Exists,
Expression,
ExpressionList,
ExpressionWrapper,
F,
FloatField,
Func,
IntegerField,
Max,
Min,
Model,
OrderBy,
OuterRef,
Q,
StdDev,
Subquery,
Sum,
TimeField,
UUIDField,
Value,
Variance,
When,
)
from django.db.models.expressions import (
Col,
Combinable,
CombinedExpression,
RawSQL,
Ref,
)
from django.db.models.functions import (
Coalesce,
Concat,
Left,
Length,
Lower,
Substr,
Upper,
)
from django.db.models.sql import constants
from django.db.models.sql.datastructures import Join
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import (
Approximate,
CaptureQueriesContext,
isolate_apps,
register_lookup,
)
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import SimpleLazyObject
from .models import (
UUID,
UUIDPK,
Company,
Employee,
Experiment,
Manager,
Number,
RemoteEmployee,
Result,
SimulationRun,
Time,
)
class BasicExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.example_inc = Company.objects.create(
name="Example Inc.",
num_employees=2300,
num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10),
)
cls.foobar_ltd = Company.objects.create(
name="Foobar Ltd.",
num_employees=3,
num_chairs=4,
based_in_eu=True,
ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20),
)
cls.max = Employee.objects.create(
firstname="Max", lastname="Mustermann", salary=30
)
cls.gmbh = Company.objects.create(
name="Test GmbH", num_employees=32, num_chairs=1, ceo=cls.max
)
def setUp(self):
self.company_query = Company.objects.values(
"name", "num_employees", "num_chairs"
).order_by("name", "num_employees", "num_chairs")
def test_annotate_values_aggregate(self):
companies = (
Company.objects.annotate(
salaries=F("ceo__salary"),
)
.values("num_employees", "salaries")
.aggregate(
result=Sum(
F("salaries") + F("num_employees"), output_field=IntegerField()
),
)
)
self.assertEqual(companies["result"], 2395)
def test_annotate_values_filter(self):
companies = (
Company.objects.annotate(
foo=RawSQL("%s", ["value"]),
)
.filter(foo="value")
.order_by("name")
)
self.assertSequenceEqual(
companies,
[self.example_inc, self.foobar_ltd, self.gmbh],
)
def test_annotate_values_count(self):
companies = Company.objects.annotate(foo=RawSQL("%s", ["value"]))
self.assertEqual(companies.count(), 3)
@skipUnlessDBFeature("supports_boolean_expr_in_select_clause")
def test_filtering_on_annotate_that_uses_q(self):
self.assertEqual(
Company.objects.annotate(
num_employees_check=ExpressionWrapper(
Q(num_employees__gt=3), output_field=BooleanField()
)
)
.filter(num_employees_check=True)
.count(),
2,
)
def test_filtering_on_q_that_is_boolean(self):
self.assertEqual(
Company.objects.filter(
ExpressionWrapper(Q(num_employees__gt=3), output_field=BooleanField())
).count(),
2,
)
def test_filtering_on_rawsql_that_is_boolean(self):
self.assertEqual(
Company.objects.filter(
RawSQL("num_employees > %s", (3,), output_field=BooleanField()),
).count(),
2,
)
def test_filter_inter_attribute(self):
# We can filter on attribute relationships on same model obj, e.g.
# find companies where the number of employees is greater
# than the number of chairs.
self.assertSequenceEqual(
self.company_query.filter(num_employees__gt=F("num_chairs")),
[
{
"num_chairs": 5,
"name": "Example Inc.",
"num_employees": 2300,
},
{"num_chairs": 1, "name": "Test GmbH", "num_employees": 32},
],
)
def test_update(self):
# We can set one field to have the value of another field
# Make sure we have enough chairs
self.company_query.update(num_chairs=F("num_employees"))
self.assertSequenceEqual(
self.company_query,
[
{"num_chairs": 2300, "name": "Example Inc.", "num_employees": 2300},
{"num_chairs": 3, "name": "Foobar Ltd.", "num_employees": 3},
{"num_chairs": 32, "name": "Test GmbH", "num_employees": 32},
],
)
def test_arithmetic(self):
# We can perform arithmetic operations in expressions
# Make sure we have 2 spare chairs
self.company_query.update(num_chairs=F("num_employees") + 2)
self.assertSequenceEqual(
self.company_query,
[
{"num_chairs": 2302, "name": "Example Inc.", "num_employees": 2300},
{"num_chairs": 5, "name": "Foobar Ltd.", "num_employees": 3},
{"num_chairs": 34, "name": "Test GmbH", "num_employees": 32},
],
)
def test_order_of_operations(self):
# Law of order of operations is followed
self.company_query.update(
num_chairs=F("num_employees") + 2 * F("num_employees")
)
self.assertSequenceEqual(
self.company_query,
[
{"num_chairs": 6900, "name": "Example Inc.", "num_employees": 2300},
{"num_chairs": 9, "name": "Foobar Ltd.", "num_employees": 3},
{"num_chairs": 96, "name": "Test GmbH", "num_employees": 32},
],
)
def test_parenthesis_priority(self):
# Law of order of operations can be overridden by parentheses
self.company_query.update(
num_chairs=(F("num_employees") + 2) * F("num_employees")
)
self.assertSequenceEqual(
self.company_query,
[
{"num_chairs": 5294600, "name": "Example Inc.", "num_employees": 2300},
{"num_chairs": 15, "name": "Foobar Ltd.", "num_employees": 3},
{"num_chairs": 1088, "name": "Test GmbH", "num_employees": 32},
],
)
def test_update_with_fk(self):
# ForeignKey can become updated with the value of another ForeignKey.
self.assertEqual(Company.objects.update(point_of_contact=F("ceo")), 3)
self.assertQuerysetEqual(
Company.objects.all(),
["Joe Smith", "Frank Meyer", "Max Mustermann"],
lambda c: str(c.point_of_contact),
ordered=False,
)
def test_update_with_none(self):
Number.objects.create(integer=1, float=1.0)
Number.objects.create(integer=2)
Number.objects.filter(float__isnull=False).update(float=Value(None))
self.assertQuerysetEqual(
Number.objects.all(), [None, None], lambda n: n.float, ordered=False
)
def test_filter_with_join(self):
# F Expressions can also span joins
Company.objects.update(point_of_contact=F("ceo"))
c = Company.objects.first()
c.point_of_contact = Employee.objects.create(
firstname="Guido", lastname="van Rossum"
)
c.save()
self.assertQuerysetEqual(
Company.objects.filter(ceo__firstname=F("point_of_contact__firstname")),
["Foobar Ltd.", "Test GmbH"],
lambda c: c.name,
ordered=False,
)
Company.objects.exclude(ceo__firstname=F("point_of_contact__firstname")).update(
name="foo"
)
self.assertEqual(
Company.objects.exclude(ceo__firstname=F("point_of_contact__firstname"))
.get()
.name,
"foo",
)
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
Company.objects.exclude(
ceo__firstname=F("point_of_contact__firstname")
).update(name=F("point_of_contact__lastname"))
def test_object_update(self):
# F expressions can be used to update attributes on single objects
self.gmbh.num_employees = F("num_employees") + 4
self.gmbh.save()
self.gmbh.refresh_from_db()
self.assertEqual(self.gmbh.num_employees, 36)
def test_new_object_save(self):
# We should be able to use Funcs when inserting new data
test_co = Company(
name=Lower(Value("UPPER")), num_employees=32, num_chairs=1, ceo=self.max
)
test_co.save()
test_co.refresh_from_db()
self.assertEqual(test_co.name, "upper")
def test_new_object_create(self):
test_co = Company.objects.create(
name=Lower(Value("UPPER")), num_employees=32, num_chairs=1, ceo=self.max
)
test_co.refresh_from_db()
self.assertEqual(test_co.name, "upper")
def test_object_create_with_aggregate(self):
# Aggregates are not allowed when inserting new data
msg = (
"Aggregate functions are not allowed in this query "
"(num_employees=Max(Value(1)))."
)
with self.assertRaisesMessage(FieldError, msg):
Company.objects.create(
name="Company",
num_employees=Max(Value(1)),
num_chairs=1,
ceo=Employee.objects.create(
firstname="Just", lastname="Doit", salary=30
),
)
def test_object_update_fk(self):
# F expressions cannot be used to update attributes which are foreign
# keys, or attributes which involve joins.
test_gmbh = Company.objects.get(pk=self.gmbh.pk)
msg = 'F(ceo)": "Company.point_of_contact" must be a "Employee" instance.'
with self.assertRaisesMessage(ValueError, msg):
test_gmbh.point_of_contact = F("ceo")
test_gmbh.point_of_contact = self.gmbh.ceo
test_gmbh.save()
test_gmbh.name = F("ceo__lastname")
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
test_gmbh.save()
def test_update_inherited_field_value(self):
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
RemoteEmployee.objects.update(adjusted_salary=F("salary") * 5)
def test_object_update_unsaved_objects(self):
# F expressions cannot be used to update attributes on objects which do
# not yet exist in the database
acme = Company(
name="The Acme Widget Co.", num_employees=12, num_chairs=5, ceo=self.max
)
acme.num_employees = F("num_employees") + 16
msg = (
'Failed to insert expression "Col(expressions_company, '
'expressions.Company.num_employees) + Value(16)" on '
"expressions.Company.num_employees. F() expressions can only be "
"used to update, not to insert."
)
with self.assertRaisesMessage(ValueError, msg):
acme.save()
acme.num_employees = 12
acme.name = Lower(F("name"))
msg = (
'Failed to insert expression "Lower(Col(expressions_company, '
'expressions.Company.name))" on expressions.Company.name. F() '
"expressions can only be used to update, not to insert."
)
with self.assertRaisesMessage(ValueError, msg):
acme.save()
def test_ticket_11722_iexact_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
test = Employee.objects.create(firstname="Test", lastname="test")
queryset = Employee.objects.filter(firstname__iexact=F("lastname"))
self.assertSequenceEqual(queryset, [test])
def test_ticket_16731_startswith_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
e2 = Employee.objects.create(firstname="Jack", lastname="Jackson")
e3 = Employee.objects.create(firstname="Jack", lastname="jackson")
self.assertSequenceEqual(
Employee.objects.filter(lastname__startswith=F("firstname")),
[e2, e3] if connection.features.has_case_insensitive_like else [e2],
)
qs = Employee.objects.filter(lastname__istartswith=F("firstname")).order_by(
"pk"
)
self.assertSequenceEqual(qs, [e2, e3])
def test_ticket_18375_join_reuse(self):
# Reverse multijoin F() references and the lookup target the same join.
# Pre #18375 the F() join was generated first and the lookup couldn't
# reuse that join.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F("company_ceo_set__num_employees")
)
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_ticket_18375_kwarg_ordering(self):
# The next query was dict-randomization dependent - if the "gte=1"
# was seen first, then the F() will reuse the join generated by the
# gte lookup, if F() was seen first, then it generated a join the
# other lookups could not reuse.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F("company_ceo_set__num_employees"),
company_ceo_set__num_chairs__gte=1,
)
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_ticket_18375_kwarg_ordering_2(self):
# Another similar case for F() than above. Now we have the same join
# in two filter kwargs, one in the lhs lookup, one in F. Here pre
# #18375 the amount of joins generated was random if dict
# randomization was enabled, that is the generated query dependent
# on which clause was seen first.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F("pk"),
pk=F("company_ceo_set__num_employees"),
)
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_ticket_18375_chained_filters(self):
# F() expressions do not reuse joins from previous filter.
qs = Employee.objects.filter(company_ceo_set__num_employees=F("pk")).filter(
company_ceo_set__num_employees=F("company_ceo_set__num_employees")
)
self.assertEqual(str(qs.query).count("JOIN"), 2)
def test_order_by_exists(self):
mary = Employee.objects.create(
firstname="Mary", lastname="Mustermann", salary=20
)
mustermanns_by_seniority = Employee.objects.filter(
lastname="Mustermann"
).order_by(
# Order by whether the employee is the CEO of a company
Exists(Company.objects.filter(ceo=OuterRef("pk"))).desc()
)
self.assertSequenceEqual(mustermanns_by_seniority, [self.max, mary])
def test_order_by_multiline_sql(self):
raw_order_by = (
RawSQL(
"""
CASE WHEN num_employees > 1000
THEN num_chairs
ELSE 0 END
""",
[],
).desc(),
RawSQL(
"""
CASE WHEN num_chairs > 1
THEN 1
ELSE 0 END
""",
[],
).asc(),
)
for qs in (
Company.objects.all(),
Company.objects.distinct(),
):
with self.subTest(qs=qs):
self.assertSequenceEqual(
qs.order_by(*raw_order_by),
[self.example_inc, self.gmbh, self.foobar_ltd],
)
def test_outerref(self):
inner = Company.objects.filter(point_of_contact=OuterRef("pk"))
msg = (
"This queryset contains a reference to an outer query and may only "
"be used in a subquery."
)
with self.assertRaisesMessage(ValueError, msg):
inner.exists()
outer = Employee.objects.annotate(is_point_of_contact=Exists(inner))
self.assertIs(outer.exists(), True)
def test_exist_single_field_output_field(self):
queryset = Company.objects.values("pk")
self.assertIsInstance(Exists(queryset).output_field, BooleanField)
def test_subquery(self):
Company.objects.filter(name="Example Inc.").update(
point_of_contact=Employee.objects.get(firstname="Joe", lastname="Smith"),
ceo=self.max,
)
Employee.objects.create(firstname="Bob", lastname="Brown", salary=40)
qs = (
Employee.objects.annotate(
is_point_of_contact=Exists(
Company.objects.filter(point_of_contact=OuterRef("pk"))
),
is_not_point_of_contact=~Exists(
Company.objects.filter(point_of_contact=OuterRef("pk"))
),
is_ceo_of_small_company=Exists(
Company.objects.filter(num_employees__lt=200, ceo=OuterRef("pk"))
),
is_ceo_small_2=~~Exists(
Company.objects.filter(num_employees__lt=200, ceo=OuterRef("pk"))
),
largest_company=Subquery(
Company.objects.order_by("-num_employees")
.filter(Q(ceo=OuterRef("pk")) | Q(point_of_contact=OuterRef("pk")))
.values("name")[:1],
output_field=CharField(),
),
)
.values(
"firstname",
"is_point_of_contact",
"is_not_point_of_contact",
"is_ceo_of_small_company",
"is_ceo_small_2",
"largest_company",
)
.order_by("firstname")
)
results = list(qs)
# Could use Coalesce(subq, Value('')) instead except for the bug in
# cx_Oracle mentioned in #23843.
bob = results[0]
if (
bob["largest_company"] == ""
and connection.features.interprets_empty_strings_as_nulls
):
bob["largest_company"] = None
self.assertEqual(
results,
[
{
"firstname": "Bob",
"is_point_of_contact": False,
"is_not_point_of_contact": True,
"is_ceo_of_small_company": False,
"is_ceo_small_2": False,
"largest_company": None,
},
{
"firstname": "Frank",
"is_point_of_contact": False,
"is_not_point_of_contact": True,
"is_ceo_of_small_company": True,
"is_ceo_small_2": True,
"largest_company": "Foobar Ltd.",
},
{
"firstname": "Joe",
"is_point_of_contact": True,
"is_not_point_of_contact": False,
"is_ceo_of_small_company": False,
"is_ceo_small_2": False,
"largest_company": "Example Inc.",
},
{
"firstname": "Max",
"is_point_of_contact": False,
"is_not_point_of_contact": True,
"is_ceo_of_small_company": True,
"is_ceo_small_2": True,
"largest_company": "Example Inc.",
},
],
)
# A less elegant way to write the same query: this uses a LEFT OUTER
# JOIN and an IS NULL, inside a WHERE NOT IN which is probably less
# efficient than EXISTS.
self.assertCountEqual(
qs.filter(is_point_of_contact=True).values("pk"),
Employee.objects.exclude(company_point_of_contact_set=None).values("pk"),
)
def test_subquery_eq(self):
qs = Employee.objects.annotate(
is_ceo=Exists(Company.objects.filter(ceo=OuterRef("pk"))),
is_point_of_contact=Exists(
Company.objects.filter(point_of_contact=OuterRef("pk")),
),
small_company=Exists(
queryset=Company.objects.filter(num_employees__lt=200),
),
).filter(is_ceo=True, is_point_of_contact=False, small_company=True)
self.assertNotEqual(
qs.query.annotations["is_ceo"],
qs.query.annotations["is_point_of_contact"],
)
self.assertNotEqual(
qs.query.annotations["is_ceo"],
qs.query.annotations["small_company"],
)
def test_subquery_sql(self):
employees = Employee.objects.all()
employees_subquery = Subquery(employees)
self.assertIs(employees_subquery.query.subquery, True)
self.assertIs(employees.query.subquery, False)
compiler = employees_subquery.query.get_compiler(connection=connection)
sql, _ = employees_subquery.as_sql(compiler, connection)
self.assertIn("(SELECT ", sql)
def test_in_subquery(self):
# This is a contrived test (and you really wouldn't write this query),
# but it is a succinct way to test the __in=Subquery() construct.
small_companies = Company.objects.filter(num_employees__lt=200).values("pk")
subquery_test = Company.objects.filter(pk__in=Subquery(small_companies))
self.assertCountEqual(subquery_test, [self.foobar_ltd, self.gmbh])
subquery_test2 = Company.objects.filter(
pk=Subquery(small_companies.filter(num_employees=3))
)
self.assertCountEqual(subquery_test2, [self.foobar_ltd])
def test_uuid_pk_subquery(self):
u = UUIDPK.objects.create()
UUID.objects.create(uuid_fk=u)
qs = UUIDPK.objects.filter(id__in=Subquery(UUID.objects.values("uuid_fk__id")))
self.assertCountEqual(qs, [u])
def test_nested_subquery(self):
inner = Company.objects.filter(point_of_contact=OuterRef("pk"))
outer = Employee.objects.annotate(is_point_of_contact=Exists(inner))
contrived = Employee.objects.annotate(
is_point_of_contact=Subquery(
outer.filter(pk=OuterRef("pk")).values("is_point_of_contact"),
output_field=BooleanField(),
),
)
self.assertCountEqual(contrived.values_list(), outer.values_list())
def test_nested_subquery_join_outer_ref(self):
inner = Employee.objects.filter(pk=OuterRef("ceo__pk")).values("pk")
qs = Employee.objects.annotate(
ceo_company=Subquery(
Company.objects.filter(
ceo__in=inner,
ceo__pk=OuterRef("pk"),
).values("pk"),
),
)
self.assertSequenceEqual(
qs.values_list("ceo_company", flat=True),
[self.example_inc.pk, self.foobar_ltd.pk, self.gmbh.pk],
)
def test_nested_subquery_outer_ref_2(self):
first = Time.objects.create(time="09:00")
second = Time.objects.create(time="17:00")
third = Time.objects.create(time="21:00")
SimulationRun.objects.bulk_create(
[
SimulationRun(start=first, end=second, midpoint="12:00"),
SimulationRun(start=first, end=third, midpoint="15:00"),
SimulationRun(start=second, end=first, midpoint="00:00"),
]
)
inner = Time.objects.filter(
time=OuterRef(OuterRef("time")), pk=OuterRef("start")
).values("time")
middle = SimulationRun.objects.annotate(other=Subquery(inner)).values("other")[
:1
]
outer = Time.objects.annotate(other=Subquery(middle, output_field=TimeField()))
# This is a contrived example. It exercises the double OuterRef form.
self.assertCountEqual(outer, [first, second, third])
def test_nested_subquery_outer_ref_with_autofield(self):
first = Time.objects.create(time="09:00")
second = Time.objects.create(time="17:00")
SimulationRun.objects.create(start=first, end=second, midpoint="12:00")
inner = SimulationRun.objects.filter(start=OuterRef(OuterRef("pk"))).values(
"start"
)
middle = Time.objects.annotate(other=Subquery(inner)).values("other")[:1]
outer = Time.objects.annotate(
other=Subquery(middle, output_field=IntegerField())
)
# This exercises the double OuterRef form with AutoField as pk.
self.assertCountEqual(outer, [first, second])
def test_annotations_within_subquery(self):
Company.objects.filter(num_employees__lt=50).update(
ceo=Employee.objects.get(firstname="Frank")
)
inner = (
Company.objects.filter(ceo=OuterRef("pk"))
.values("ceo")
.annotate(total_employees=Sum("num_employees"))
.values("total_employees")
)
outer = Employee.objects.annotate(total_employees=Subquery(inner)).filter(
salary__lte=Subquery(inner)
)
self.assertSequenceEqual(
outer.order_by("-total_employees").values("salary", "total_employees"),
[
{"salary": 10, "total_employees": 2300},
{"salary": 20, "total_employees": 35},
],
)
def test_subquery_references_joined_table_twice(self):
inner = Company.objects.filter(
num_chairs__gte=OuterRef("ceo__salary"),
num_employees__gte=OuterRef("point_of_contact__salary"),
)
# Another contrived example (there is no need to have a subquery here)
outer = Company.objects.filter(pk__in=Subquery(inner.values("pk")))
self.assertFalse(outer.exists())
def test_subquery_filter_by_aggregate(self):
Number.objects.create(integer=1000, float=1.2)
Employee.objects.create(salary=1000)
qs = Number.objects.annotate(
min_valuable_count=Subquery(
Employee.objects.filter(
salary=OuterRef("integer"),
)
.annotate(cnt=Count("salary"))
.filter(cnt__gt=0)
.values("cnt")[:1]
),
)
self.assertEqual(qs.get().float, 1.2)
def test_subquery_filter_by_lazy(self):
self.max.manager = Manager.objects.create(name="Manager")
self.max.save()
max_manager = SimpleLazyObject(
lambda: Manager.objects.get(pk=self.max.manager.pk)
)
qs = Company.objects.annotate(
ceo_manager=Subquery(
Employee.objects.filter(
lastname=OuterRef("ceo__lastname"),
).values("manager"),
),
).filter(ceo_manager=max_manager)
self.assertEqual(qs.get(), self.gmbh)
def test_aggregate_subquery_annotation(self):
with self.assertNumQueries(1) as ctx:
aggregate = Company.objects.annotate(
ceo_salary=Subquery(
Employee.objects.filter(
id=OuterRef("ceo_id"),
).values("salary")
),
).aggregate(
ceo_salary_gt_20=Count("pk", filter=Q(ceo_salary__gt=20)),
)
self.assertEqual(aggregate, {"ceo_salary_gt_20": 1})
# Aggregation over a subquery annotation doesn't annotate the subquery
# twice in the inner query.
sql = ctx.captured_queries[0]["sql"]
self.assertLessEqual(sql.count("SELECT"), 3)
# GROUP BY isn't required to aggregate over a query that doesn't
# contain nested aggregates.
self.assertNotIn("GROUP BY", sql)
@skipUnlessDBFeature("supports_over_clause")
def test_aggregate_rawsql_annotation(self):
with self.assertNumQueries(1) as ctx:
aggregate = Company.objects.annotate(
salary=RawSQL("SUM(num_chairs) OVER (ORDER BY num_employees)", []),
).aggregate(
count=Count("pk"),
)
self.assertEqual(aggregate, {"count": 3})
sql = ctx.captured_queries[0]["sql"]
self.assertNotIn("GROUP BY", sql)
def test_explicit_output_field(self):
class FuncA(Func):
output_field = CharField()
class FuncB(Func):
pass
expr = FuncB(FuncA())
self.assertEqual(expr.output_field, FuncA.output_field)
def test_outerref_mixed_case_table_name(self):
inner = Result.objects.filter(result_time__gte=OuterRef("experiment__assigned"))
outer = Result.objects.filter(pk__in=Subquery(inner.values("pk")))
self.assertFalse(outer.exists())
def test_outerref_with_operator(self):
inner = Company.objects.filter(num_employees=OuterRef("ceo__salary") + 2)
outer = Company.objects.filter(pk__in=Subquery(inner.values("pk")))
self.assertEqual(outer.get().name, "Test GmbH")
def test_nested_outerref_with_function(self):
self.gmbh.point_of_contact = Employee.objects.get(lastname="Meyer")
self.gmbh.save()
inner = Employee.objects.filter(
lastname__startswith=Left(OuterRef(OuterRef("lastname")), 1),
)
qs = Employee.objects.annotate(
ceo_company=Subquery(
Company.objects.filter(
point_of_contact__in=inner,
ceo__pk=OuterRef("pk"),
).values("name"),
),
).filter(ceo_company__isnull=False)
self.assertEqual(qs.get().ceo_company, "Test GmbH")
def test_annotation_with_outerref(self):
gmbh_salary = Company.objects.annotate(
max_ceo_salary_raise=Subquery(
Company.objects.annotate(
salary_raise=OuterRef("num_employees") + F("num_employees"),
)
.order_by("-salary_raise")
.values("salary_raise")[:1],
output_field=IntegerField(),
),
).get(pk=self.gmbh.pk)
self.assertEqual(gmbh_salary.max_ceo_salary_raise, 2332)
def test_annotation_with_nested_outerref(self):
self.gmbh.point_of_contact = Employee.objects.get(lastname="Meyer")
self.gmbh.save()
inner = Employee.objects.annotate(
outer_lastname=OuterRef(OuterRef("lastname")),
).filter(lastname__startswith=Left("outer_lastname", 1))
qs = Employee.objects.annotate(
ceo_company=Subquery(
Company.objects.filter(
point_of_contact__in=inner,
ceo__pk=OuterRef("pk"),
).values("name"),
),
).filter(ceo_company__isnull=False)
self.assertEqual(qs.get().ceo_company, "Test GmbH")
def test_pickle_expression(self):
expr = Value(1)
expr.convert_value # populate cached property
self.assertEqual(pickle.loads(pickle.dumps(expr)), expr)
def test_incorrect_field_in_F_expression(self):
with self.assertRaisesMessage(
FieldError, "Cannot resolve keyword 'nope' into field."
):
list(Employee.objects.filter(firstname=F("nope")))
def test_incorrect_joined_field_in_F_expression(self):
with self.assertRaisesMessage(
FieldError, "Cannot resolve keyword 'nope' into field."
):
list(Company.objects.filter(ceo__pk=F("point_of_contact__nope")))
def test_exists_in_filter(self):
inner = Company.objects.filter(ceo=OuterRef("pk")).values("pk")
qs1 = Employee.objects.filter(Exists(inner))
qs2 = Employee.objects.annotate(found=Exists(inner)).filter(found=True)
self.assertCountEqual(qs1, qs2)
self.assertFalse(Employee.objects.exclude(Exists(inner)).exists())
self.assertCountEqual(qs2, Employee.objects.exclude(~Exists(inner)))
def test_subquery_in_filter(self):
inner = Company.objects.filter(ceo=OuterRef("pk")).values("based_in_eu")
self.assertSequenceEqual(
Employee.objects.filter(Subquery(inner)),
[self.foobar_ltd.ceo],
)
def test_subquery_group_by_outerref_in_filter(self):
inner = (
Company.objects.annotate(
employee=OuterRef("pk"),
)
.values("employee")
.annotate(
min_num_chairs=Min("num_chairs"),
)
.values("ceo")
)
self.assertIs(Employee.objects.filter(pk__in=Subquery(inner)).exists(), True)
def test_case_in_filter_if_boolean_output_field(self):
is_ceo = Company.objects.filter(ceo=OuterRef("pk"))
is_poc = Company.objects.filter(point_of_contact=OuterRef("pk"))
qs = Employee.objects.filter(
Case(
When(Exists(is_ceo), then=True),
When(Exists(is_poc), then=True),
default=False,
output_field=BooleanField(),
),
)
self.assertCountEqual(qs, [self.example_inc.ceo, self.foobar_ltd.ceo, self.max])
def test_boolean_expression_combined(self):
is_ceo = Company.objects.filter(ceo=OuterRef("pk"))
is_poc = Company.objects.filter(point_of_contact=OuterRef("pk"))
self.gmbh.point_of_contact = self.max
self.gmbh.save()
self.assertCountEqual(
Employee.objects.filter(Exists(is_ceo) | Exists(is_poc)),
[self.example_inc.ceo, self.foobar_ltd.ceo, self.max],
)
self.assertCountEqual(
Employee.objects.filter(Exists(is_ceo) & Exists(is_poc)),
[self.max],
)
self.assertCountEqual(
Employee.objects.filter(Exists(is_ceo) & Q(salary__gte=30)),
[self.max],
)
self.assertCountEqual(
Employee.objects.filter(Exists(is_poc) | Q(salary__lt=15)),
[self.example_inc.ceo, self.max],
)
self.assertCountEqual(
Employee.objects.filter(Q(salary__gte=30) & Exists(is_ceo)),
[self.max],
)
self.assertCountEqual(
Employee.objects.filter(Q(salary__lt=15) | Exists(is_poc)),
[self.example_inc.ceo, self.max],
)
def test_boolean_expression_combined_with_empty_Q(self):
is_poc = Company.objects.filter(point_of_contact=OuterRef("pk"))
self.gmbh.point_of_contact = self.max
self.gmbh.save()
tests = [
Exists(is_poc) & Q(),
Q() & Exists(is_poc),
Exists(is_poc) | Q(),
Q() | Exists(is_poc),
Q(Exists(is_poc)) & Q(),
Q() & Q(Exists(is_poc)),
Q(Exists(is_poc)) | Q(),
Q() | Q(Exists(is_poc)),
]
for conditions in tests:
with self.subTest(conditions):
self.assertCountEqual(Employee.objects.filter(conditions), [self.max])
def test_boolean_expression_in_Q(self):
is_poc = Company.objects.filter(point_of_contact=OuterRef("pk"))
self.gmbh.point_of_contact = self.max
self.gmbh.save()
self.assertCountEqual(Employee.objects.filter(Q(Exists(is_poc))), [self.max])
class IterableLookupInnerExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
ceo = Employee.objects.create(firstname="Just", lastname="Doit", salary=30)
# MySQL requires that the values calculated for expressions don't pass
# outside of the field's range, so it's inconvenient to use the values
# in the more general tests.
cls.c5020 = Company.objects.create(
name="5020 Ltd", num_employees=50, num_chairs=20, ceo=ceo
)
cls.c5040 = Company.objects.create(
name="5040 Ltd", num_employees=50, num_chairs=40, ceo=ceo
)
cls.c5050 = Company.objects.create(
name="5050 Ltd", num_employees=50, num_chairs=50, ceo=ceo
)
cls.c5060 = Company.objects.create(
name="5060 Ltd", num_employees=50, num_chairs=60, ceo=ceo
)
cls.c99300 = Company.objects.create(
name="99300 Ltd", num_employees=99, num_chairs=300, ceo=ceo
)
def test_in_lookup_allows_F_expressions_and_expressions_for_integers(self):
# __in lookups can use F() expressions for integers.
queryset = Company.objects.filter(num_employees__in=([F("num_chairs") - 10]))
self.assertSequenceEqual(queryset, [self.c5060])
self.assertCountEqual(
Company.objects.filter(
num_employees__in=([F("num_chairs") - 10, F("num_chairs") + 10])
),
[self.c5040, self.c5060],
)
self.assertCountEqual(
Company.objects.filter(
num_employees__in=(
[F("num_chairs") - 10, F("num_chairs"), F("num_chairs") + 10]
)
),
[self.c5040, self.c5050, self.c5060],
)
def test_expressions_in_lookups_join_choice(self):
midpoint = datetime.time(13, 0)
t1 = Time.objects.create(time=datetime.time(12, 0))
t2 = Time.objects.create(time=datetime.time(14, 0))
s1 = SimulationRun.objects.create(start=t1, end=t2, midpoint=midpoint)
SimulationRun.objects.create(start=t1, end=None, midpoint=midpoint)
SimulationRun.objects.create(start=None, end=t2, midpoint=midpoint)
SimulationRun.objects.create(start=None, end=None, midpoint=midpoint)
queryset = SimulationRun.objects.filter(
midpoint__range=[F("start__time"), F("end__time")]
)
self.assertSequenceEqual(queryset, [s1])
for alias in queryset.query.alias_map.values():
if isinstance(alias, Join):
self.assertEqual(alias.join_type, constants.INNER)
queryset = SimulationRun.objects.exclude(
midpoint__range=[F("start__time"), F("end__time")]
)
self.assertQuerysetEqual(queryset, [], ordered=False)
for alias in queryset.query.alias_map.values():
if isinstance(alias, Join):
self.assertEqual(alias.join_type, constants.LOUTER)
def test_range_lookup_allows_F_expressions_and_expressions_for_integers(self):
# Range lookups can use F() expressions for integers.
Company.objects.filter(num_employees__exact=F("num_chairs"))
self.assertCountEqual(
Company.objects.filter(num_employees__range=(F("num_chairs"), 100)),
[self.c5020, self.c5040, self.c5050],
)
self.assertCountEqual(
Company.objects.filter(
num_employees__range=(F("num_chairs") - 10, F("num_chairs") + 10)
),
[self.c5040, self.c5050, self.c5060],
)
self.assertCountEqual(
Company.objects.filter(num_employees__range=(F("num_chairs") - 10, 100)),
[self.c5020, self.c5040, self.c5050, self.c5060],
)
self.assertCountEqual(
Company.objects.filter(num_employees__range=(1, 100)),
[self.c5020, self.c5040, self.c5050, self.c5060, self.c99300],
)
def test_range_lookup_namedtuple(self):
EmployeeRange = namedtuple("EmployeeRange", ["minimum", "maximum"])
qs = Company.objects.filter(
num_employees__range=EmployeeRange(minimum=51, maximum=100),
)
self.assertSequenceEqual(qs, [self.c99300])
@unittest.skipUnless(
connection.vendor == "sqlite",
"This defensive test only works on databases that don't validate parameter "
"types",
)
def test_expressions_not_introduce_sql_injection_via_untrusted_string_inclusion(
self,
):
"""
This tests that SQL injection isn't possible using compilation of
expressions in iterable filters, as their compilation happens before
the main query compilation. It's limited to SQLite, as PostgreSQL,
Oracle and other vendors have defense in depth against this by type
checking. Testing against SQLite (the most permissive of the built-in
databases) demonstrates that the problem doesn't exist while keeping
the test simple.
"""
queryset = Company.objects.filter(name__in=[F("num_chairs") + "1)) OR ((1==1"])
self.assertQuerysetEqual(queryset, [], ordered=False)
def test_in_lookup_allows_F_expressions_and_expressions_for_datetimes(self):
start = datetime.datetime(2016, 2, 3, 15, 0, 0)
end = datetime.datetime(2016, 2, 5, 15, 0, 0)
experiment_1 = Experiment.objects.create(
name="Integrity testing",
assigned=start.date(),
start=start,
end=end,
completed=end.date(),
estimated_time=end - start,
)
experiment_2 = Experiment.objects.create(
name="Taste testing",
assigned=start.date(),
start=start,
end=end,
completed=end.date(),
estimated_time=end - start,
)
r1 = Result.objects.create(
experiment=experiment_1,
result_time=datetime.datetime(2016, 2, 4, 15, 0, 0),
)
Result.objects.create(
experiment=experiment_1,
result_time=datetime.datetime(2016, 3, 10, 2, 0, 0),
)
Result.objects.create(
experiment=experiment_2,
result_time=datetime.datetime(2016, 1, 8, 5, 0, 0),
)
within_experiment_time = [F("experiment__start"), F("experiment__end")]
queryset = Result.objects.filter(result_time__range=within_experiment_time)
self.assertSequenceEqual(queryset, [r1])
class FTests(SimpleTestCase):
def test_deepcopy(self):
f = F("foo")
g = deepcopy(f)
self.assertEqual(f.name, g.name)
def test_deconstruct(self):
f = F("name")
path, args, kwargs = f.deconstruct()
self.assertEqual(path, "django.db.models.F")
self.assertEqual(args, (f.name,))
self.assertEqual(kwargs, {})
def test_equal(self):
f = F("name")
same_f = F("name")
other_f = F("username")
self.assertEqual(f, same_f)
self.assertNotEqual(f, other_f)
def test_hash(self):
d = {F("name"): "Bob"}
self.assertIn(F("name"), d)
self.assertEqual(d[F("name")], "Bob")
def test_not_equal_Value(self):
f = F("name")
value = Value("name")
self.assertNotEqual(f, value)
self.assertNotEqual(value, f)
class ExpressionsTests(TestCase):
def test_F_reuse(self):
f = F("id")
n = Number.objects.create(integer=-1)
c = Company.objects.create(
name="Example Inc.",
num_employees=2300,
num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith"),
)
c_qs = Company.objects.filter(id=f)
self.assertEqual(c_qs.get(), c)
# Reuse the same F-object for another queryset
n_qs = Number.objects.filter(id=f)
self.assertEqual(n_qs.get(), n)
# The original query still works correctly
self.assertEqual(c_qs.get(), c)
def test_patterns_escape(self):
r"""
Special characters (e.g. %, _ and \) stored in database are
properly escaped when using a pattern lookup with an expression
refs #16731
"""
Employee.objects.bulk_create(
[
Employee(firstname="Johnny", lastname="%John"),
Employee(firstname="Jean-Claude", lastname="Claud_"),
Employee(firstname="Jean-Claude", lastname="Claude%"),
Employee(firstname="Johnny", lastname="Joh\\n"),
Employee(firstname="Johnny", lastname="_ohn"),
]
)
claude = Employee.objects.create(firstname="Jean-Claude", lastname="Claude")
john = Employee.objects.create(firstname="Johnny", lastname="John")
john_sign = Employee.objects.create(firstname="%Joh\\nny", lastname="%Joh\\n")
self.assertCountEqual(
Employee.objects.filter(firstname__contains=F("lastname")),
[john_sign, john, claude],
)
self.assertCountEqual(
Employee.objects.filter(firstname__startswith=F("lastname")),
[john_sign, john],
)
self.assertSequenceEqual(
Employee.objects.filter(firstname__endswith=F("lastname")),
[claude],
)
def test_insensitive_patterns_escape(self):
r"""
Special characters (e.g. %, _ and \) stored in database are
properly escaped when using a case insensitive pattern lookup with an
expression -- refs #16731
"""
Employee.objects.bulk_create(
[
Employee(firstname="Johnny", lastname="%john"),
Employee(firstname="Jean-Claude", lastname="claud_"),
Employee(firstname="Jean-Claude", lastname="claude%"),
Employee(firstname="Johnny", lastname="joh\\n"),
Employee(firstname="Johnny", lastname="_ohn"),
]
)
claude = Employee.objects.create(firstname="Jean-Claude", lastname="claude")
john = Employee.objects.create(firstname="Johnny", lastname="john")
john_sign = Employee.objects.create(firstname="%Joh\\nny", lastname="%joh\\n")
self.assertCountEqual(
Employee.objects.filter(firstname__icontains=F("lastname")),
[john_sign, john, claude],
)
self.assertCountEqual(
Employee.objects.filter(firstname__istartswith=F("lastname")),
[john_sign, john],
)
self.assertSequenceEqual(
Employee.objects.filter(firstname__iendswith=F("lastname")),
[claude],
)
@isolate_apps("expressions")
class SimpleExpressionTests(SimpleTestCase):
def test_equal(self):
self.assertEqual(Expression(), Expression())
self.assertEqual(
Expression(IntegerField()), Expression(output_field=IntegerField())
)
self.assertEqual(Expression(IntegerField()), mock.ANY)
self.assertNotEqual(Expression(IntegerField()), Expression(CharField()))
class TestModel(Model):
field = IntegerField()
other_field = IntegerField()
self.assertNotEqual(
Expression(TestModel._meta.get_field("field")),
Expression(TestModel._meta.get_field("other_field")),
)
def test_hash(self):
self.assertEqual(hash(Expression()), hash(Expression()))
self.assertEqual(
hash(Expression(IntegerField())),
hash(Expression(output_field=IntegerField())),
)
self.assertNotEqual(
hash(Expression(IntegerField())),
hash(Expression(CharField())),
)
class TestModel(Model):
field = IntegerField()
other_field = IntegerField()
self.assertNotEqual(
hash(Expression(TestModel._meta.get_field("field"))),
hash(Expression(TestModel._meta.get_field("other_field"))),
)
class ExpressionsNumericTests(TestCase):
@classmethod
def setUpTestData(cls):
Number(integer=-1).save()
Number(integer=42).save()
Number(integer=1337).save()
Number.objects.update(float=F("integer"))
def test_fill_with_value_from_same_object(self):
"""
We can fill a value in all objects with an other value of the
same object.
"""
self.assertQuerysetEqual(
Number.objects.all(),
[(-1, -1), (42, 42), (1337, 1337)],
lambda n: (n.integer, round(n.float)),
ordered=False,
)
def test_increment_value(self):
"""
We can increment a value of all objects in a query set.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0).update(integer=F("integer") + 1), 2
)
self.assertQuerysetEqual(
Number.objects.all(),
[(-1, -1), (43, 42), (1338, 1337)],
lambda n: (n.integer, round(n.float)),
ordered=False,
)
def test_filter_not_equals_other_field(self):
"""
We can filter for objects, where a value is not equals the value
of an other field.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0).update(integer=F("integer") + 1), 2
)
self.assertQuerysetEqual(
Number.objects.exclude(float=F("integer")),
[(43, 42), (1338, 1337)],
lambda n: (n.integer, round(n.float)),
ordered=False,
)
def test_filter_decimal_expression(self):
obj = Number.objects.create(integer=0, float=1, decimal_value=Decimal("1"))
qs = Number.objects.annotate(
x=ExpressionWrapper(Value(1), output_field=DecimalField()),
).filter(Q(x=1, integer=0) & Q(x=Decimal("1")))
self.assertSequenceEqual(qs, [obj])
def test_complex_expressions(self):
"""
Complex expressions of different connection types are possible.
"""
n = Number.objects.create(integer=10, float=123.45)
self.assertEqual(
Number.objects.filter(pk=n.pk).update(float=F("integer") + F("float") * 2),
1,
)
self.assertEqual(Number.objects.get(pk=n.pk).integer, 10)
self.assertEqual(
Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3)
)
def test_decimal_expression(self):
n = Number.objects.create(integer=1, decimal_value=Decimal("0.5"))
n.decimal_value = F("decimal_value") - Decimal("0.4")
n.save()
n.refresh_from_db()
self.assertEqual(n.decimal_value, Decimal("0.1"))
class ExpressionOperatorTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n = Number.objects.create(integer=42, float=15.5)
cls.n1 = Number.objects.create(integer=-42, float=-15.5)
def test_lefthand_addition(self):
# LH Addition of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") + 15, float=F("float") + 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)
)
def test_lefthand_subtraction(self):
# LH Subtraction of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") - 15, float=F("float") - 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3)
)
def test_lefthand_multiplication(self):
# Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") * 15, float=F("float") * 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)
)
def test_lefthand_division(self):
# LH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") / 2, float=F("float") / 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3)
)
def test_lefthand_modulo(self):
# LH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=F("integer") % 20)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2)
def test_lefthand_modulo_null(self):
# LH Modulo arithmetic on integers.
Employee.objects.create(firstname="John", lastname="Doe", salary=None)
qs = Employee.objects.annotate(modsalary=F("salary") % 20)
self.assertIsNone(qs.get().salary)
def test_lefthand_bitwise_and(self):
# LH Bitwise ands on integers
Number.objects.filter(pk=self.n.pk).update(integer=F("integer").bitand(56))
Number.objects.filter(pk=self.n1.pk).update(integer=F("integer").bitand(-56))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -64)
def test_lefthand_bitwise_left_shift_operator(self):
Number.objects.update(integer=F("integer").bitleftshift(2))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 168)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -168)
def test_lefthand_bitwise_right_shift_operator(self):
Number.objects.update(integer=F("integer").bitrightshift(2))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 10)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -11)
def test_lefthand_bitwise_or(self):
# LH Bitwise or on integers
Number.objects.update(integer=F("integer").bitor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -10)
def test_lefthand_transformed_field_bitwise_or(self):
Employee.objects.create(firstname="Max", lastname="Mustermann")
with register_lookup(CharField, Length):
qs = Employee.objects.annotate(bitor=F("lastname__length").bitor(48))
self.assertEqual(qs.get().bitor, 58)
def test_lefthand_power(self):
# LH Power arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") ** 2, float=F("float") ** 1.5
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2)
)
def test_lefthand_bitwise_xor(self):
Number.objects.update(integer=F("integer").bitxor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 26)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -26)
def test_lefthand_bitwise_xor_null(self):
employee = Employee.objects.create(firstname="John", lastname="Doe")
Employee.objects.update(salary=F("salary").bitxor(48))
employee.refresh_from_db()
self.assertIsNone(employee.salary)
def test_lefthand_bitwise_xor_right_null(self):
employee = Employee.objects.create(firstname="John", lastname="Doe", salary=48)
Employee.objects.update(salary=F("salary").bitxor(None))
employee.refresh_from_db()
self.assertIsNone(employee.salary)
@unittest.skipUnless(
connection.vendor == "oracle", "Oracle doesn't support bitwise XOR."
)
def test_lefthand_bitwise_xor_not_supported(self):
msg = "Bitwise XOR is not supported in Oracle."
with self.assertRaisesMessage(NotSupportedError, msg):
Number.objects.update(integer=F("integer").bitxor(48))
def test_right_hand_addition(self):
# Right hand operators
Number.objects.filter(pk=self.n.pk).update(
integer=15 + F("integer"), float=42.7 + F("float")
)
# RH Addition of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)
)
def test_right_hand_subtraction(self):
Number.objects.filter(pk=self.n.pk).update(
integer=15 - F("integer"), float=42.7 - F("float")
)
# RH Subtraction of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3)
)
def test_right_hand_multiplication(self):
# RH Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=15 * F("integer"), float=42.7 * F("float")
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)
)
def test_right_hand_division(self):
# RH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=640 / F("integer"), float=42.7 / F("float")
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3)
)
def test_right_hand_modulo(self):
# RH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=69 % F("integer"))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
def test_righthand_power(self):
# RH Power arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=2 ** F("integer"), float=1.5 ** F("float")
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3)
)
class FTimeDeltaTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.sday = sday = datetime.date(2010, 6, 25)
cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
midnight = datetime.time(0)
delta0 = datetime.timedelta(0)
delta1 = datetime.timedelta(microseconds=253000)
delta2 = datetime.timedelta(seconds=44)
delta3 = datetime.timedelta(hours=21, minutes=8)
delta4 = datetime.timedelta(days=10)
delta5 = datetime.timedelta(days=90)
# Test data is set so that deltas and delays will be
# strictly increasing.
cls.deltas = []
cls.delays = []
cls.days_long = []
# e0: started same day as assigned, zero duration
end = stime + delta0
cls.e0 = Experiment.objects.create(
name="e0",
assigned=sday,
start=stime,
end=end,
completed=end.date(),
estimated_time=delta0,
)
cls.deltas.append(delta0)
cls.delays.append(
cls.e0.start - datetime.datetime.combine(cls.e0.assigned, midnight)
)
cls.days_long.append(cls.e0.completed - cls.e0.assigned)
# e1: started one day after assigned, tiny duration, data
# set so that end time has no fractional seconds, which
# tests an edge case on sqlite.
delay = datetime.timedelta(1)
end = stime + delay + delta1
e1 = Experiment.objects.create(
name="e1",
assigned=sday,
start=stime + delay,
end=end,
completed=end.date(),
estimated_time=delta1,
)
cls.deltas.append(delta1)
cls.delays.append(e1.start - datetime.datetime.combine(e1.assigned, midnight))
cls.days_long.append(e1.completed - e1.assigned)
# e2: started three days after assigned, small duration
end = stime + delta2
e2 = Experiment.objects.create(
name="e2",
assigned=sday - datetime.timedelta(3),
start=stime,
end=end,
completed=end.date(),
estimated_time=datetime.timedelta(hours=1),
)
cls.deltas.append(delta2)
cls.delays.append(e2.start - datetime.datetime.combine(e2.assigned, midnight))
cls.days_long.append(e2.completed - e2.assigned)
# e3: started four days after assigned, medium duration
delay = datetime.timedelta(4)
end = stime + delay + delta3
e3 = Experiment.objects.create(
name="e3",
assigned=sday,
start=stime + delay,
end=end,
completed=end.date(),
estimated_time=delta3,
)
cls.deltas.append(delta3)
cls.delays.append(e3.start - datetime.datetime.combine(e3.assigned, midnight))
cls.days_long.append(e3.completed - e3.assigned)
# e4: started 10 days after assignment, long duration
end = stime + delta4
e4 = Experiment.objects.create(
name="e4",
assigned=sday - datetime.timedelta(10),
start=stime,
end=end,
completed=end.date(),
estimated_time=delta4 - datetime.timedelta(1),
)
cls.deltas.append(delta4)
cls.delays.append(e4.start - datetime.datetime.combine(e4.assigned, midnight))
cls.days_long.append(e4.completed - e4.assigned)
# e5: started a month after assignment, very long duration
delay = datetime.timedelta(30)
end = stime + delay + delta5
e5 = Experiment.objects.create(
name="e5",
assigned=sday,
start=stime + delay,
end=end,
completed=end.date(),
estimated_time=delta5,
)
cls.deltas.append(delta5)
cls.delays.append(e5.start - datetime.datetime.combine(e5.assigned, midnight))
cls.days_long.append(e5.completed - e5.assigned)
cls.expnames = [e.name for e in Experiment.objects.all()]
def test_multiple_query_compilation(self):
# Ticket #21643
queryset = Experiment.objects.filter(
end__lt=F("start") + datetime.timedelta(hours=1)
)
q1 = str(queryset.query)
q2 = str(queryset.query)
self.assertEqual(q1, q2)
def test_query_clone(self):
# Ticket #21643 - Crash when compiling query more than once
qs = Experiment.objects.filter(end__lt=F("start") + datetime.timedelta(hours=1))
qs2 = qs.all()
list(qs)
list(qs2)
# Intentionally no assert
def test_delta_add(self):
for i, delta in enumerate(self.deltas):
test_set = [
e.name for e in Experiment.objects.filter(end__lt=F("start") + delta)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name for e in Experiment.objects.filter(end__lt=delta + F("start"))
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name for e in Experiment.objects.filter(end__lte=F("start") + delta)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_delta_subtract(self):
for i, delta in enumerate(self.deltas):
test_set = [
e.name for e in Experiment.objects.filter(start__gt=F("end") - delta)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name for e in Experiment.objects.filter(start__gte=F("end") - delta)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_exclude(self):
for i, delta in enumerate(self.deltas):
test_set = [
e.name for e in Experiment.objects.exclude(end__lt=F("start") + delta)
]
self.assertEqual(test_set, self.expnames[i:])
test_set = [
e.name for e in Experiment.objects.exclude(end__lte=F("start") + delta)
]
self.assertEqual(test_set, self.expnames[i + 1 :])
def test_date_comparison(self):
for i, days in enumerate(self.days_long):
test_set = [
e.name
for e in Experiment.objects.filter(completed__lt=F("assigned") + days)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name
for e in Experiment.objects.filter(completed__lte=F("assigned") + days)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_datetime_and_durationfield_addition_with_filter(self):
test_set = Experiment.objects.filter(end=F("start") + F("estimated_time"))
self.assertGreater(test_set.count(), 0)
self.assertEqual(
[e.name for e in test_set],
[
e.name
for e in Experiment.objects.all()
if e.end == e.start + e.estimated_time
],
)
def test_datetime_and_duration_field_addition_with_annotate_and_no_output_field(
self,
):
test_set = Experiment.objects.annotate(
estimated_end=F("start") + F("estimated_time")
)
self.assertEqual(
[e.estimated_end for e in test_set],
[e.start + e.estimated_time for e in test_set],
)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_datetime_subtraction_with_annotate_and_no_output_field(self):
test_set = Experiment.objects.annotate(
calculated_duration=F("end") - F("start")
)
self.assertEqual(
[e.calculated_duration for e in test_set],
[e.end - e.start for e in test_set],
)
def test_mixed_comparisons1(self):
for i, delay in enumerate(self.delays):
test_set = [
e.name
for e in Experiment.objects.filter(assigned__gt=F("start") - delay)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name
for e in Experiment.objects.filter(assigned__gte=F("start") - delay)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_mixed_comparisons2(self):
for i, delay in enumerate(self.delays):
delay = datetime.timedelta(delay.days)
test_set = [
e.name
for e in Experiment.objects.filter(start__lt=F("assigned") + delay)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name
for e in Experiment.objects.filter(
start__lte=F("assigned") + delay + datetime.timedelta(1)
)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_delta_update(self):
for delta in self.deltas:
exps = Experiment.objects.all()
expected_durations = [e.duration() for e in exps]
expected_starts = [e.start + delta for e in exps]
expected_ends = [e.end + delta for e in exps]
Experiment.objects.update(start=F("start") + delta, end=F("end") + delta)
exps = Experiment.objects.all()
new_starts = [e.start for e in exps]
new_ends = [e.end for e in exps]
new_durations = [e.duration() for e in exps]
self.assertEqual(expected_starts, new_starts)
self.assertEqual(expected_ends, new_ends)
self.assertEqual(expected_durations, new_durations)
def test_invalid_operator(self):
with self.assertRaises(DatabaseError):
list(Experiment.objects.filter(start=F("start") * datetime.timedelta(0)))
def test_durationfield_add(self):
zeros = [
e.name
for e in Experiment.objects.filter(start=F("start") + F("estimated_time"))
]
self.assertEqual(zeros, ["e0"])
end_less = [
e.name
for e in Experiment.objects.filter(end__lt=F("start") + F("estimated_time"))
]
self.assertEqual(end_less, ["e2"])
delta_math = [
e.name
for e in Experiment.objects.filter(
end__gte=F("start") + F("estimated_time") + datetime.timedelta(hours=1)
)
]
self.assertEqual(delta_math, ["e4"])
queryset = Experiment.objects.annotate(
shifted=ExpressionWrapper(
F("start") + Value(None, output_field=DurationField()),
output_field=DateTimeField(),
)
)
self.assertIsNone(queryset.first().shifted)
def test_durationfield_multiply_divide(self):
Experiment.objects.update(scalar=2)
tests = [
(Decimal("2"), 2),
(F("scalar"), 2),
(2, 2),
(3.2, 3.2),
]
for expr, scalar in tests:
with self.subTest(expr=expr):
qs = Experiment.objects.annotate(
multiplied=ExpressionWrapper(
expr * F("estimated_time"),
output_field=DurationField(),
),
divided=ExpressionWrapper(
F("estimated_time") / expr,
output_field=DurationField(),
),
)
for experiment in qs:
self.assertEqual(
experiment.multiplied,
experiment.estimated_time * scalar,
)
self.assertEqual(
experiment.divided,
experiment.estimated_time / scalar,
)
def test_duration_expressions(self):
for delta in self.deltas:
qs = Experiment.objects.annotate(duration=F("estimated_time") + delta)
for obj in qs:
self.assertEqual(obj.duration, obj.estimated_time + delta)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_date_subtraction(self):
queryset = Experiment.objects.annotate(
completion_duration=F("completed") - F("assigned"),
)
at_least_5_days = {
e.name
for e in queryset.filter(
completion_duration__gte=datetime.timedelta(days=5)
)
}
self.assertEqual(at_least_5_days, {"e3", "e4", "e5"})
at_least_120_days = {
e.name
for e in queryset.filter(
completion_duration__gte=datetime.timedelta(days=120)
)
}
self.assertEqual(at_least_120_days, {"e5"})
less_than_5_days = {
e.name
for e in queryset.filter(completion_duration__lt=datetime.timedelta(days=5))
}
self.assertEqual(less_than_5_days, {"e0", "e1", "e2"})
queryset = Experiment.objects.annotate(
difference=F("completed") - Value(None, output_field=DateField()),
)
self.assertIsNone(queryset.first().difference)
queryset = Experiment.objects.annotate(
shifted=ExpressionWrapper(
F("completed") - Value(None, output_field=DurationField()),
output_field=DateField(),
)
)
self.assertIsNone(queryset.first().shifted)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_date_subquery_subtraction(self):
subquery = Experiment.objects.filter(pk=OuterRef("pk")).values("completed")
queryset = Experiment.objects.annotate(
difference=subquery - F("completed"),
).filter(difference=datetime.timedelta())
self.assertTrue(queryset.exists())
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_date_case_subtraction(self):
queryset = Experiment.objects.annotate(
date_case=Case(
When(Q(name="e0"), then=F("completed")),
output_field=DateField(),
),
completed_value=Value(
self.e0.completed,
output_field=DateField(),
),
difference=F("date_case") - F("completed_value"),
).filter(difference=datetime.timedelta())
self.assertEqual(queryset.get(), self.e0)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_time_subtraction(self):
Time.objects.create(time=datetime.time(12, 30, 15, 2345))
queryset = Time.objects.annotate(
difference=F("time") - Value(datetime.time(11, 15, 0)),
)
self.assertEqual(
queryset.get().difference,
datetime.timedelta(hours=1, minutes=15, seconds=15, microseconds=2345),
)
queryset = Time.objects.annotate(
difference=F("time") - Value(None, output_field=TimeField()),
)
self.assertIsNone(queryset.first().difference)
queryset = Time.objects.annotate(
shifted=ExpressionWrapper(
F("time") - Value(None, output_field=DurationField()),
output_field=TimeField(),
)
)
self.assertIsNone(queryset.first().shifted)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_time_subquery_subtraction(self):
Time.objects.create(time=datetime.time(12, 30, 15, 2345))
subquery = Time.objects.filter(pk=OuterRef("pk")).values("time")
queryset = Time.objects.annotate(
difference=subquery - F("time"),
).filter(difference=datetime.timedelta())
self.assertTrue(queryset.exists())
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_datetime_subtraction(self):
under_estimate = [
e.name
for e in Experiment.objects.filter(estimated_time__gt=F("end") - F("start"))
]
self.assertEqual(under_estimate, ["e2"])
over_estimate = [
e.name
for e in Experiment.objects.filter(estimated_time__lt=F("end") - F("start"))
]
self.assertEqual(over_estimate, ["e4"])
queryset = Experiment.objects.annotate(
difference=F("start") - Value(None, output_field=DateTimeField()),
)
self.assertIsNone(queryset.first().difference)
queryset = Experiment.objects.annotate(
shifted=ExpressionWrapper(
F("start") - Value(None, output_field=DurationField()),
output_field=DateTimeField(),
)
)
self.assertIsNone(queryset.first().shifted)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_datetime_subquery_subtraction(self):
subquery = Experiment.objects.filter(pk=OuterRef("pk")).values("start")
queryset = Experiment.objects.annotate(
difference=subquery - F("start"),
).filter(difference=datetime.timedelta())
self.assertTrue(queryset.exists())
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_datetime_subtraction_microseconds(self):
delta = datetime.timedelta(microseconds=8999999999999999)
Experiment.objects.update(end=F("start") + delta)
qs = Experiment.objects.annotate(delta=F("end") - F("start"))
for e in qs:
self.assertEqual(e.delta, delta)
def test_duration_with_datetime(self):
# Exclude e1 which has very high precision so we can test this on all
# backends regardless of whether or not it supports
# microsecond_precision.
over_estimate = (
Experiment.objects.exclude(name="e1")
.filter(
completed__gt=self.stime + F("estimated_time"),
)
.order_by("name")
)
self.assertQuerysetEqual(over_estimate, ["e3", "e4", "e5"], lambda e: e.name)
def test_duration_with_datetime_microseconds(self):
delta = datetime.timedelta(microseconds=8999999999999999)
qs = Experiment.objects.annotate(
dt=ExpressionWrapper(
F("start") + delta,
output_field=DateTimeField(),
)
)
for e in qs:
self.assertEqual(e.dt, e.start + delta)
def test_date_minus_duration(self):
more_than_4_days = Experiment.objects.filter(
assigned__lt=F("completed") - Value(datetime.timedelta(days=4))
)
self.assertQuerysetEqual(more_than_4_days, ["e3", "e4", "e5"], lambda e: e.name)
def test_negative_timedelta_update(self):
# subtract 30 seconds, 30 minutes, 2 hours and 2 days
experiments = (
Experiment.objects.filter(name="e0")
.annotate(
start_sub_seconds=F("start") + datetime.timedelta(seconds=-30),
)
.annotate(
start_sub_minutes=F("start_sub_seconds")
+ datetime.timedelta(minutes=-30),
)
.annotate(
start_sub_hours=F("start_sub_minutes") + datetime.timedelta(hours=-2),
)
.annotate(
new_start=F("start_sub_hours") + datetime.timedelta(days=-2),
)
)
expected_start = datetime.datetime(2010, 6, 23, 9, 45, 0)
# subtract 30 microseconds
experiments = experiments.annotate(
new_start=F("new_start") + datetime.timedelta(microseconds=-30)
)
expected_start += datetime.timedelta(microseconds=+746970)
experiments.update(start=F("new_start"))
e0 = Experiment.objects.get(name="e0")
self.assertEqual(e0.start, expected_start)
class ValueTests(TestCase):
def test_update_TimeField_using_Value(self):
Time.objects.create()
Time.objects.update(time=Value(datetime.time(1), output_field=TimeField()))
self.assertEqual(Time.objects.get().time, datetime.time(1))
def test_update_UUIDField_using_Value(self):
UUID.objects.create()
UUID.objects.update(
uuid=Value(
uuid.UUID("12345678901234567890123456789012"), output_field=UUIDField()
)
)
self.assertEqual(
UUID.objects.get().uuid, uuid.UUID("12345678901234567890123456789012")
)
def test_deconstruct(self):
value = Value("name")
path, args, kwargs = value.deconstruct()
self.assertEqual(path, "django.db.models.Value")
self.assertEqual(args, (value.value,))
self.assertEqual(kwargs, {})
def test_deconstruct_output_field(self):
value = Value("name", output_field=CharField())
path, args, kwargs = value.deconstruct()
self.assertEqual(path, "django.db.models.Value")
self.assertEqual(args, (value.value,))
self.assertEqual(len(kwargs), 1)
self.assertEqual(
kwargs["output_field"].deconstruct(), CharField().deconstruct()
)
def test_repr(self):
tests = [
(None, "Value(None)"),
("str", "Value('str')"),
(True, "Value(True)"),
(42, "Value(42)"),
(
datetime.datetime(2019, 5, 15),
"Value(datetime.datetime(2019, 5, 15, 0, 0))",
),
(Decimal("3.14"), "Value(Decimal('3.14'))"),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertEqual(repr(Value(value)), expected)
def test_equal(self):
value = Value("name")
self.assertEqual(value, Value("name"))
self.assertNotEqual(value, Value("username"))
def test_hash(self):
d = {Value("name"): "Bob"}
self.assertIn(Value("name"), d)
self.assertEqual(d[Value("name")], "Bob")
def test_equal_output_field(self):
value = Value("name", output_field=CharField())
same_value = Value("name", output_field=CharField())
other_value = Value("name", output_field=TimeField())
no_output_field = Value("name")
self.assertEqual(value, same_value)
self.assertNotEqual(value, other_value)
self.assertNotEqual(value, no_output_field)
def test_raise_empty_expressionlist(self):
msg = "ExpressionList requires at least one expression"
with self.assertRaisesMessage(ValueError, msg):
ExpressionList()
def test_compile_unresolved(self):
# This test might need to be revisited later on if #25425 is enforced.
compiler = Time.objects.all().query.get_compiler(connection=connection)
value = Value("foo")
self.assertEqual(value.as_sql(compiler, connection), ("%s", ["foo"]))
value = Value("foo", output_field=CharField())
self.assertEqual(value.as_sql(compiler, connection), ("%s", ["foo"]))
def test_output_field_decimalfield(self):
Time.objects.create()
time = Time.objects.annotate(one=Value(1, output_field=DecimalField())).first()
self.assertEqual(time.one, 1)
def test_resolve_output_field(self):
value_types = [
("str", CharField),
(True, BooleanField),
(42, IntegerField),
(3.14, FloatField),
(datetime.date(2019, 5, 15), DateField),
(datetime.datetime(2019, 5, 15), DateTimeField),
(datetime.time(3, 16), TimeField),
(datetime.timedelta(1), DurationField),
(Decimal("3.14"), DecimalField),
(b"", BinaryField),
(uuid.uuid4(), UUIDField),
]
for value, output_field_type in value_types:
with self.subTest(type=type(value)):
expr = Value(value)
self.assertIsInstance(expr.output_field, output_field_type)
def test_resolve_output_field_failure(self):
msg = "Cannot resolve expression type, unknown output_field"
with self.assertRaisesMessage(FieldError, msg):
Value(object()).output_field
def test_output_field_does_not_create_broken_validators(self):
"""
The output field for a given Value doesn't get cleaned & validated,
however validators may still be instantiated for a given field type
and this demonstrates that they don't throw an exception.
"""
value_types = [
"str",
True,
42,
3.14,
datetime.date(2019, 5, 15),
datetime.datetime(2019, 5, 15),
datetime.time(3, 16),
datetime.timedelta(1),
Decimal("3.14"),
b"",
uuid.uuid4(),
]
for value in value_types:
with self.subTest(type=type(value)):
field = Value(value)._resolve_output_field()
field.clean(value, model_instance=None)
class ExistsTests(TestCase):
def test_optimizations(self):
with CaptureQueriesContext(connection) as context:
list(
Experiment.objects.values(
exists=Exists(
Experiment.objects.order_by("pk"),
)
).order_by()
)
captured_queries = context.captured_queries
self.assertEqual(len(captured_queries), 1)
captured_sql = captured_queries[0]["sql"]
self.assertNotIn(
connection.ops.quote_name(Experiment._meta.pk.column),
captured_sql,
)
self.assertIn(
connection.ops.limit_offset_sql(None, 1),
captured_sql,
)
self.assertNotIn("ORDER BY", captured_sql)
def test_negated_empty_exists(self):
manager = Manager.objects.create()
qs = Manager.objects.filter(~Exists(Manager.objects.none()) & Q(pk=manager.pk))
self.assertSequenceEqual(qs, [manager])
def test_select_negated_empty_exists(self):
manager = Manager.objects.create()
qs = Manager.objects.annotate(
not_exists=~Exists(Manager.objects.none())
).filter(pk=manager.pk)
self.assertSequenceEqual(qs, [manager])
self.assertIs(qs.get().not_exists, True)
class FieldTransformTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.sday = sday = datetime.date(2010, 6, 25)
cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
cls.ex1 = Experiment.objects.create(
name="Experiment 1",
assigned=sday,
completed=sday + datetime.timedelta(2),
estimated_time=datetime.timedelta(2),
start=stime,
end=stime + datetime.timedelta(2),
)
def test_month_aggregation(self):
self.assertEqual(
Experiment.objects.aggregate(month_count=Count("assigned__month")),
{"month_count": 1},
)
def test_transform_in_values(self):
self.assertSequenceEqual(
Experiment.objects.values("assigned__month"),
[{"assigned__month": 6}],
)
def test_multiple_transforms_in_values(self):
self.assertSequenceEqual(
Experiment.objects.values("end__date__month"),
[{"end__date__month": 6}],
)
class ReprTests(SimpleTestCase):
def test_expressions(self):
self.assertEqual(
repr(Case(When(a=1))),
"<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>",
)
self.assertEqual(
repr(When(Q(age__gte=18), then=Value("legal"))),
"<When: WHEN <Q: (AND: ('age__gte', 18))> THEN Value('legal')>",
)
self.assertEqual(repr(Col("alias", "field")), "Col(alias, field)")
self.assertEqual(repr(F("published")), "F(published)")
self.assertEqual(
repr(F("cost") + F("tax")), "<CombinedExpression: F(cost) + F(tax)>"
)
self.assertEqual(
repr(ExpressionWrapper(F("cost") + F("tax"), IntegerField())),
"ExpressionWrapper(F(cost) + F(tax))",
)
self.assertEqual(
repr(Func("published", function="TO_CHAR")),
"Func(F(published), function=TO_CHAR)",
)
self.assertEqual(repr(OrderBy(Value(1))), "OrderBy(Value(1), descending=False)")
self.assertEqual(repr(RawSQL("table.col", [])), "RawSQL(table.col, [])")
self.assertEqual(
repr(Ref("sum_cost", Sum("cost"))), "Ref(sum_cost, Sum(F(cost)))"
)
self.assertEqual(repr(Value(1)), "Value(1)")
self.assertEqual(
repr(ExpressionList(F("col"), F("anothercol"))),
"ExpressionList(F(col), F(anothercol))",
)
self.assertEqual(
repr(ExpressionList(OrderBy(F("col"), descending=False))),
"ExpressionList(OrderBy(F(col), descending=False))",
)
def test_functions(self):
self.assertEqual(repr(Coalesce("a", "b")), "Coalesce(F(a), F(b))")
self.assertEqual(repr(Concat("a", "b")), "Concat(ConcatPair(F(a), F(b)))")
self.assertEqual(repr(Length("a")), "Length(F(a))")
self.assertEqual(repr(Lower("a")), "Lower(F(a))")
self.assertEqual(repr(Substr("a", 1, 3)), "Substr(F(a), Value(1), Value(3))")
self.assertEqual(repr(Upper("a")), "Upper(F(a))")
def test_aggregates(self):
self.assertEqual(repr(Avg("a")), "Avg(F(a))")
self.assertEqual(repr(Count("a")), "Count(F(a))")
self.assertEqual(repr(Count("*")), "Count('*')")
self.assertEqual(repr(Max("a")), "Max(F(a))")
self.assertEqual(repr(Min("a")), "Min(F(a))")
self.assertEqual(repr(StdDev("a")), "StdDev(F(a), sample=False)")
self.assertEqual(repr(Sum("a")), "Sum(F(a))")
self.assertEqual(
repr(Variance("a", sample=True)), "Variance(F(a), sample=True)"
)
def test_distinct_aggregates(self):
self.assertEqual(repr(Count("a", distinct=True)), "Count(F(a), distinct=True)")
self.assertEqual(repr(Count("*", distinct=True)), "Count('*', distinct=True)")
def test_filtered_aggregates(self):
filter = Q(a=1)
self.assertEqual(
repr(Avg("a", filter=filter)), "Avg(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(Count("a", filter=filter)), "Count(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(Max("a", filter=filter)), "Max(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(Min("a", filter=filter)), "Min(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(StdDev("a", filter=filter)),
"StdDev(F(a), filter=(AND: ('a', 1)), sample=False)",
)
self.assertEqual(
repr(Sum("a", filter=filter)), "Sum(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(Variance("a", sample=True, filter=filter)),
"Variance(F(a), filter=(AND: ('a', 1)), sample=True)",
)
self.assertEqual(
repr(Count("a", filter=filter, distinct=True)),
"Count(F(a), distinct=True, filter=(AND: ('a', 1)))",
)
class CombinableTests(SimpleTestCase):
bitwise_msg = (
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def test_negation(self):
c = Combinable()
self.assertEqual(-c, c * -1)
def test_and(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
Combinable() & Combinable()
def test_or(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
Combinable() | Combinable()
def test_xor(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
Combinable() ^ Combinable()
def test_reversed_and(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
object() & Combinable()
def test_reversed_or(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
object() | Combinable()
def test_reversed_xor(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
object() ^ Combinable()
class CombinedExpressionTests(SimpleTestCase):
def test_resolve_output_field_number(self):
tests = [
(IntegerField, AutoField, IntegerField),
(AutoField, IntegerField, IntegerField),
(IntegerField, DecimalField, DecimalField),
(DecimalField, IntegerField, DecimalField),
(IntegerField, FloatField, FloatField),
(FloatField, IntegerField, FloatField),
]
connectors = [
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
Combinable.DIV,
Combinable.MOD,
]
for lhs, rhs, combined in tests:
for connector in connectors:
with self.subTest(
lhs=lhs, connector=connector, rhs=rhs, combined=combined
):
expr = CombinedExpression(
Expression(lhs()),
connector,
Expression(rhs()),
)
self.assertIsInstance(expr.output_field, combined)
def test_resolve_output_field_with_null(self):
def null():
return Value(None)
tests = [
# Numbers.
(AutoField, Combinable.ADD, null),
(DecimalField, Combinable.ADD, null),
(FloatField, Combinable.ADD, null),
(IntegerField, Combinable.ADD, null),
(IntegerField, Combinable.SUB, null),
(null, Combinable.ADD, IntegerField),
# Dates.
(DateField, Combinable.ADD, null),
(DateTimeField, Combinable.ADD, null),
(DurationField, Combinable.ADD, null),
(TimeField, Combinable.ADD, null),
(TimeField, Combinable.SUB, null),
(null, Combinable.ADD, DateTimeField),
(DateField, Combinable.SUB, null),
]
for lhs, connector, rhs in tests:
msg = (
f"Cannot infer type of {connector!r} expression involving these types: "
)
with self.subTest(lhs=lhs, connector=connector, rhs=rhs):
expr = CombinedExpression(
Expression(lhs()),
connector,
Expression(rhs()),
)
with self.assertRaisesMessage(FieldError, msg):
expr.output_field
def test_resolve_output_field_dates(self):
tests = [
# Add - same type.
(DateField, Combinable.ADD, DateField, FieldError),
(DateTimeField, Combinable.ADD, DateTimeField, FieldError),
(TimeField, Combinable.ADD, TimeField, FieldError),
(DurationField, Combinable.ADD, DurationField, DurationField),
# Add - different type.
(DateField, Combinable.ADD, DurationField, DateTimeField),
(DateTimeField, Combinable.ADD, DurationField, DateTimeField),
(TimeField, Combinable.ADD, DurationField, TimeField),
(DurationField, Combinable.ADD, DateField, DateTimeField),
(DurationField, Combinable.ADD, DateTimeField, DateTimeField),
(DurationField, Combinable.ADD, TimeField, TimeField),
# Subtract - same type.
(DateField, Combinable.SUB, DateField, DurationField),
(DateTimeField, Combinable.SUB, DateTimeField, DurationField),
(TimeField, Combinable.SUB, TimeField, DurationField),
(DurationField, Combinable.SUB, DurationField, DurationField),
# Subtract - different type.
(DateField, Combinable.SUB, DurationField, DateTimeField),
(DateTimeField, Combinable.SUB, DurationField, DateTimeField),
(TimeField, Combinable.SUB, DurationField, TimeField),
(DurationField, Combinable.SUB, DateField, FieldError),
(DurationField, Combinable.SUB, DateTimeField, FieldError),
(DurationField, Combinable.SUB, DateTimeField, FieldError),
]
for lhs, connector, rhs, combined in tests:
msg = (
f"Cannot infer type of {connector!r} expression involving these types: "
)
with self.subTest(lhs=lhs, connector=connector, rhs=rhs, combined=combined):
expr = CombinedExpression(
Expression(lhs()),
connector,
Expression(rhs()),
)
if issubclass(combined, Exception):
with self.assertRaisesMessage(combined, msg):
expr.output_field
else:
self.assertIsInstance(expr.output_field, combined)
def test_mixed_char_date_with_annotate(self):
queryset = Experiment.objects.annotate(nonsense=F("name") + F("assigned"))
msg = (
"Cannot infer type of '+' expression involving these types: CharField, "
"DateField. You must set output_field."
)
with self.assertRaisesMessage(FieldError, msg):
list(queryset)
class ExpressionWrapperTests(SimpleTestCase):
def test_empty_group_by(self):
expr = ExpressionWrapper(Value(3), output_field=IntegerField())
self.assertEqual(expr.get_group_by_cols(), [])
def test_non_empty_group_by(self):
value = Value("f")
value.output_field = None
expr = ExpressionWrapper(Lower(value), output_field=IntegerField())
group_by_cols = expr.get_group_by_cols()
self.assertEqual(group_by_cols, [expr.expression])
self.assertEqual(group_by_cols[0].output_field, expr.output_field)
class OrderByTests(SimpleTestCase):
def test_equal(self):
self.assertEqual(
OrderBy(F("field"), nulls_last=True),
OrderBy(F("field"), nulls_last=True),
)
self.assertNotEqual(
OrderBy(F("field"), nulls_last=True),
OrderBy(F("field")),
)
def test_hash(self):
self.assertEqual(
hash(OrderBy(F("field"), nulls_last=True)),
hash(OrderBy(F("field"), nulls_last=True)),
)
self.assertNotEqual(
hash(OrderBy(F("field"), nulls_last=True)),
hash(OrderBy(F("field"))),
)
def test_nulls_false(self):
# These tests will catch ValueError in Django 5.0 when passing False to
# nulls_first and nulls_last becomes forbidden.
# msg = "nulls_first and nulls_last values must be True or None."
msg = (
"Passing nulls_first=False or nulls_last=False is deprecated, use None "
"instead."
)
with self.assertRaisesMessage(RemovedInDjango50Warning, msg):
OrderBy(F("field"), nulls_first=False)
with self.assertRaisesMessage(RemovedInDjango50Warning, msg):
OrderBy(F("field"), nulls_last=False)
with self.assertRaisesMessage(RemovedInDjango50Warning, msg):
F("field").asc(nulls_first=False)
with self.assertRaisesMessage(RemovedInDjango50Warning, msg):
F("field").desc(nulls_last=False)
|
6ecf8e89abc87f3534f8fc10af78deb005dccac149ca04ce17f0df9cd7735a50 | from datetime import datetime, timedelta
from django.db import connection
from django.db.models import TextField
from django.db.models.functions import Cast, Now
from django.test import TestCase
from django.utils import timezone
from ..models import Article
lorem_ipsum = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua."""
class NowTests(TestCase):
def test_basic(self):
a1 = Article.objects.create(
title="How to Django",
text=lorem_ipsum,
written=timezone.now(),
)
a2 = Article.objects.create(
title="How to Time Travel",
text=lorem_ipsum,
written=timezone.now(),
)
num_updated = Article.objects.filter(id=a1.id, published=None).update(
published=Now()
)
self.assertEqual(num_updated, 1)
num_updated = Article.objects.filter(id=a1.id, published=None).update(
published=Now()
)
self.assertEqual(num_updated, 0)
a1.refresh_from_db()
self.assertIsInstance(a1.published, datetime)
a2.published = Now() + timedelta(days=2)
a2.save()
a2.refresh_from_db()
self.assertIsInstance(a2.published, datetime)
self.assertQuerysetEqual(
Article.objects.filter(published__lte=Now()),
["How to Django"],
lambda a: a.title,
)
self.assertQuerysetEqual(
Article.objects.filter(published__gt=Now()),
["How to Time Travel"],
lambda a: a.title,
)
def test_microseconds(self):
Article.objects.create(
title="How to Django",
text=lorem_ipsum,
written=timezone.now(),
)
now_string = (
Article.objects.annotate(now_string=Cast(Now(), TextField()))
.get()
.now_string
)
precision = connection.features.time_cast_precision
self.assertRegex(now_string, rf"^.*\.\d{{1,{precision}}}")
|
be8281845c02e13309b740659eef52b0663b74d660ae34b1bf1f85c72d041785 | import copy
import datetime
import functools
import inspect
import warnings
from collections import defaultdict
from decimal import Decimal
from uuid import UUID
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DatabaseError, NotSupportedError, connection
from django.db.models import fields
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import Q
from django.utils.deconstruct import deconstructible
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
class SQLiteNumericMixin:
"""
Some expressions with output_field=DecimalField() must be cast to
numeric to be properly filtered.
"""
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
try:
if self.output_field.get_internal_type() == "DecimalField":
sql = "CAST(%s AS NUMERIC)" % sql
except FieldError:
pass
return sql, params
class Combinable:
"""
Provide the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = "+"
SUB = "-"
MUL = "*"
DIV = "/"
POW = "^"
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = "%%"
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = "&"
BITOR = "|"
BITLEFTSHIFT = "<<"
BITRIGHTSHIFT = ">>"
BITXOR = "#"
def _combine(self, other, connector, reversed):
if not hasattr(other, "resolve_expression"):
# everything must be resolvable to an expression
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __neg__(self):
return self._combine(-1, self.MUL, False)
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) & Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def bitleftshift(self, other):
return self._combine(other, self.BITLEFTSHIFT, False)
def bitrightshift(self, other):
return self._combine(other, self.BITRIGHTSHIFT, False)
def __xor__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) ^ Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitxor(self, other):
return self._combine(other, self.BITXOR, False)
def __or__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) | Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def __rxor__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
class BaseExpression:
"""Base class for all query expressions."""
empty_result_set_value = NotImplemented
# aggregate specific fields
is_summary = False
_output_field_resolved_to_none = False
# Can the expression be used in a WHERE clause?
filterable = True
# Can the expression can be used as a source expression in Window?
window_compatible = False
def __init__(self, output_field=None):
if output_field is not None:
self.output_field = output_field
def __getstate__(self):
state = self.__dict__.copy()
state.pop("convert_value", None)
return state
def get_db_converters(self, connection):
return (
[]
if self.convert_value is self._convert_value_noop
else [self.convert_value]
) + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert not exprs
def _parse_expressions(self, *expressions):
return [
arg
if hasattr(arg, "resolve_expression")
else (F(arg) if isinstance(arg, str) else Value(arg))
for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super().as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Return: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
return any(
expr and expr.contains_aggregate for expr in self.get_source_expressions()
)
@cached_property
def contains_over_clause(self):
return any(
expr and expr.contains_over_clause for expr in self.get_source_expressions()
)
@cached_property
def contains_column_references(self):
return any(
expr and expr.contains_column_references
for expr in self.get_source_expressions()
)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
"""
Provide the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Return: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions(
[
expr.resolve_expression(query, allow_joins, reuse, summarize)
if expr
else None
for expr in c.get_source_expressions()
]
)
return c
@property
def conditional(self):
return isinstance(self.output_field, fields.BooleanField)
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""Return the output type of this expressions."""
output_field = self._resolve_output_field()
if output_field is None:
self._output_field_resolved_to_none = True
raise FieldError("Cannot resolve expression type, unknown output_field")
return output_field
@cached_property
def _output_field_or_none(self):
"""
Return the output field of this expression, or None if
_resolve_output_field() didn't return an output type.
"""
try:
return self.output_field
except FieldError:
if not self._output_field_resolved_to_none:
raise
def _resolve_output_field(self):
"""
Attempt to infer the output type of the expression.
As a guess, if the output fields of all source fields match then simply
infer the same type here.
If a source's output field resolves to None, exclude it from this check.
If all sources are None, then an error is raised higher up the stack in
the output_field property.
"""
# This guess is mostly a bad idea, but there is quite a lot of code
# (especially 3rd party Func subclasses) that depend on it, we'd need a
# deprecation path to fix it.
sources_iter = (
source for source in self.get_source_fields() if source is not None
)
for output_field in sources_iter:
for source in sources_iter:
if not isinstance(output_field, source.__class__):
raise FieldError(
"Expression contains mixed types: %s, %s. You must "
"set output_field."
% (
output_field.__class__.__name__,
source.__class__.__name__,
)
)
return output_field
@staticmethod
def _convert_value_noop(value, expression, connection):
return value
@cached_property
def convert_value(self):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if internal_type == "FloatField":
return (
lambda value, expression, connection: None
if value is None
else float(value)
)
elif internal_type.endswith("IntegerField"):
return (
lambda value, expression, connection: None
if value is None
else int(value)
)
elif internal_type == "DecimalField":
return (
lambda value, expression, connection: None
if value is None
else Decimal(value)
)
return self._convert_value_noop
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[
e.relabeled_clone(change_map) if e is not None else None
for e in self.get_source_expressions()
]
)
return clone
def replace_expressions(self, replacements):
if replacement := replacements.get(self):
return replacement
clone = self.copy()
source_expressions = clone.get_source_expressions()
clone.set_source_expressions(
[
expr.replace_expressions(replacements) if expr else None
for expr in source_expressions
]
)
return clone
def copy(self):
return copy.copy(self)
def prefix_references(self, prefix):
clone = self.copy()
clone.set_source_expressions(
[
F(f"{prefix}{expr.name}")
if isinstance(expr, F)
else expr.prefix_references(prefix)
for expr in self.get_source_expressions()
]
)
return clone
def get_group_by_cols(self, alias=None):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""Return the underlying field types used by this aggregate."""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
if hasattr(expr, "flatten"):
yield from expr.flatten()
else:
yield expr
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, EXISTS expressions need
to be wrapped in CASE WHEN on Oracle.
"""
if hasattr(self.output_field, "select_format"):
return self.output_field.select_format(compiler, sql, params)
return sql, params
@deconstructible
class Expression(BaseExpression, Combinable):
"""An expression that can be combined with other expressions."""
@cached_property
def identity(self):
constructor_signature = inspect.signature(self.__init__)
args, kwargs = self._constructor_args
signature = constructor_signature.bind_partial(*args, **kwargs)
signature.apply_defaults()
arguments = signature.arguments.items()
identity = [self.__class__]
for arg, value in arguments:
if isinstance(value, fields.Field):
if value.name and value.model:
value = (value.model._meta.label, value.name)
else:
value = type(value)
else:
value = make_hashable(value)
identity.append((arg, value))
return tuple(identity)
def __eq__(self, other):
if not isinstance(other, Expression):
return NotImplemented
return other.identity == self.identity
def __hash__(self):
return hash(self.identity)
# Type inference for CombinedExpression.output_field.
# Missing items will result in FieldError, by design.
#
# The current approach for NULL is based on lowest common denominator behavior
# i.e. if one of the supported databases is raising an error (rather than
# return NULL) for `val <op> NULL`, then Django raises FieldError.
NoneType = type(None)
_connector_combinations = [
# Numeric operations - operands of same type.
{
connector: [
(fields.IntegerField, fields.IntegerField, fields.IntegerField),
(fields.FloatField, fields.FloatField, fields.FloatField),
(fields.DecimalField, fields.DecimalField, fields.DecimalField),
]
for connector in (
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
# Behavior for DIV with integer arguments follows Postgres/SQLite,
# not MySQL/Oracle.
Combinable.DIV,
Combinable.MOD,
Combinable.POW,
)
},
# Numeric operations - operands of different type.
{
connector: [
(fields.IntegerField, fields.DecimalField, fields.DecimalField),
(fields.DecimalField, fields.IntegerField, fields.DecimalField),
(fields.IntegerField, fields.FloatField, fields.FloatField),
(fields.FloatField, fields.IntegerField, fields.FloatField),
]
for connector in (
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
Combinable.DIV,
Combinable.MOD,
)
},
# Bitwise operators.
{
connector: [
(fields.IntegerField, fields.IntegerField, fields.IntegerField),
]
for connector in (
Combinable.BITAND,
Combinable.BITOR,
Combinable.BITLEFTSHIFT,
Combinable.BITRIGHTSHIFT,
Combinable.BITXOR,
)
},
# Numeric with NULL.
{
connector: [
(field_type, NoneType, field_type),
(NoneType, field_type, field_type),
]
for connector in (
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
Combinable.DIV,
Combinable.MOD,
Combinable.POW,
)
for field_type in (fields.IntegerField, fields.DecimalField, fields.FloatField)
},
# Date/DateTimeField/DurationField/TimeField.
{
Combinable.ADD: [
# Date/DateTimeField.
(fields.DateField, fields.DurationField, fields.DateTimeField),
(fields.DateTimeField, fields.DurationField, fields.DateTimeField),
(fields.DurationField, fields.DateField, fields.DateTimeField),
(fields.DurationField, fields.DateTimeField, fields.DateTimeField),
# DurationField.
(fields.DurationField, fields.DurationField, fields.DurationField),
# TimeField.
(fields.TimeField, fields.DurationField, fields.TimeField),
(fields.DurationField, fields.TimeField, fields.TimeField),
],
},
{
Combinable.SUB: [
# Date/DateTimeField.
(fields.DateField, fields.DurationField, fields.DateTimeField),
(fields.DateTimeField, fields.DurationField, fields.DateTimeField),
(fields.DateField, fields.DateField, fields.DurationField),
(fields.DateField, fields.DateTimeField, fields.DurationField),
(fields.DateTimeField, fields.DateField, fields.DurationField),
(fields.DateTimeField, fields.DateTimeField, fields.DurationField),
# DurationField.
(fields.DurationField, fields.DurationField, fields.DurationField),
# TimeField.
(fields.TimeField, fields.DurationField, fields.TimeField),
(fields.TimeField, fields.TimeField, fields.DurationField),
],
},
]
_connector_combinators = defaultdict(list)
def register_combinable_fields(lhs, connector, rhs, result):
"""
Register combinable types:
lhs <connector> rhs -> result
e.g.
register_combinable_fields(
IntegerField, Combinable.ADD, FloatField, FloatField
)
"""
_connector_combinators[connector].append((lhs, rhs, result))
for d in _connector_combinations:
for connector, field_types in d.items():
for lhs, rhs, result in field_types:
register_combinable_fields(lhs, connector, rhs, result)
@functools.lru_cache(maxsize=128)
def _resolve_combined_type(connector, lhs_type, rhs_type):
combinators = _connector_combinators.get(connector, ())
for combinator_lhs_type, combinator_rhs_type, combined_type in combinators:
if issubclass(lhs_type, combinator_lhs_type) and issubclass(
rhs_type, combinator_rhs_type
):
return combined_type
class CombinedExpression(SQLiteNumericMixin, Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super().__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def _resolve_output_field(self):
# We avoid using super() here for reasons given in
# Expression._resolve_output_field()
combined_type = _resolve_combined_type(
self.connector,
type(self.lhs._output_field_or_none),
type(self.rhs._output_field_or_none),
)
if combined_type is None:
raise FieldError(
f"Cannot infer type of {self.connector!r} expression involving these "
f"types: {self.lhs.output_field.__class__.__name__}, "
f"{self.rhs.output_field.__class__.__name__}. You must set "
f"output_field."
)
return combined_type()
def as_sql(self, compiler, connection):
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = "(%s)"
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
lhs = self.lhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
rhs = self.rhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
if not isinstance(self, (DurationExpression, TemporalSubtraction)):
try:
lhs_type = lhs.output_field.get_internal_type()
except (AttributeError, FieldError):
lhs_type = None
try:
rhs_type = rhs.output_field.get_internal_type()
except (AttributeError, FieldError):
rhs_type = None
if "DurationField" in {lhs_type, rhs_type} and lhs_type != rhs_type:
return DurationExpression(
self.lhs, self.connector, self.rhs
).resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
datetime_fields = {"DateField", "DateTimeField", "TimeField"}
if (
self.connector == self.SUB
and lhs_type in datetime_fields
and lhs_type == rhs_type
):
return TemporalSubtraction(self.lhs, self.rhs).resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
c = self.copy()
c.is_summary = summarize
c.lhs = lhs
c.rhs = rhs
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == "DurationField":
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
if connection.features.has_native_duration_field:
return super().as_sql(compiler, connection)
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = "(%s)"
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
if self.connector in {Combinable.MUL, Combinable.DIV}:
try:
lhs_type = self.lhs.output_field.get_internal_type()
rhs_type = self.rhs.output_field.get_internal_type()
except (AttributeError, FieldError):
pass
else:
allowed_fields = {
"DecimalField",
"DurationField",
"FloatField",
"IntegerField",
}
if lhs_type not in allowed_fields or rhs_type not in allowed_fields:
raise DatabaseError(
f"Invalid arguments for operator {self.connector}."
)
return sql, params
class TemporalSubtraction(CombinedExpression):
output_field = fields.DurationField()
def __init__(self, lhs, rhs):
super().__init__(lhs, self.SUB, rhs)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
lhs = compiler.compile(self.lhs)
rhs = compiler.compile(self.rhs)
return connection.ops.subtract_temporals(
self.lhs.output_field.get_internal_type(), lhs, rhs
)
@deconstructible(path="django.db.models.F")
class F(Combinable):
"""An object capable of resolving references to existing query objects."""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def replace_expressions(self, replacements):
return replacements.get(self, self)
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.name == other.name
def __hash__(self):
return hash(self.name)
class ResolvedOuterRef(F):
"""
An object that contains a reference to an outer query.
In this case, the reference to the outer query has been resolved because
the inner query has been used as a subquery.
"""
contains_aggregate = False
contains_over_clause = False
def as_sql(self, *args, **kwargs):
raise ValueError(
"This queryset contains a reference to an outer query and may "
"only be used in a subquery."
)
def resolve_expression(self, *args, **kwargs):
col = super().resolve_expression(*args, **kwargs)
# FIXME: Rename possibly_multivalued to multivalued and fix detection
# for non-multivalued JOINs (e.g. foreign key fields). This should take
# into account only many-to-many and one-to-many relationships.
col.possibly_multivalued = LOOKUP_SEP in self.name
return col
def relabeled_clone(self, relabels):
return self
def get_group_by_cols(self, alias=None):
return []
class OuterRef(F):
contains_aggregate = False
def resolve_expression(self, *args, **kwargs):
if isinstance(self.name, self.__class__):
return self.name
return ResolvedOuterRef(self.name)
def relabeled_clone(self, relabels):
return self
@deconstructible(path="django.db.models.Func")
class Func(SQLiteNumericMixin, Expression):
"""An SQL function call."""
function = None
template = "%(function)s(%(expressions)s)"
arg_joiner = ", "
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, output_field=None, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)"
% (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
super().__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = {**self.extra, **self._get_repr_options()}
if extra:
extra = ", ".join(
str(key) + "=" + str(val) for key, val in sorted(extra.items())
)
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def _get_repr_options(self):
"""Return a dict of extra __init__() options to include in the repr."""
return {}
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def as_sql(
self,
compiler,
connection,
function=None,
template=None,
arg_joiner=None,
**extra_context,
):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
try:
arg_sql, arg_params = compiler.compile(arg)
except EmptyResultSet:
empty_result_set_value = getattr(
arg, "empty_result_set_value", NotImplemented
)
if empty_result_set_value is NotImplemented:
raise
arg_sql, arg_params = compiler.compile(Value(empty_result_set_value))
sql_parts.append(arg_sql)
params.extend(arg_params)
data = {**self.extra, **extra_context}
# Use the first supplied value in this order: the parameter to this
# method, a value supplied in __init__()'s **extra (the value in
# `data`), or the value defined on the class.
if function is not None:
data["function"] = function
else:
data.setdefault("function", self.function)
template = template or data.get("template", self.template)
arg_joiner = arg_joiner or data.get("arg_joiner", self.arg_joiner)
data["expressions"] = data["field"] = arg_joiner.join(sql_parts)
return template % data, params
def copy(self):
copy = super().copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
@deconstructible(path="django.db.models.Value")
class Value(SQLiteNumericMixin, Expression):
"""Represent a wrapped value as a node within an expression."""
# Provide a default value for `for_save` in order to allow unresolved
# instances to be compiled until a decision is taken in #25425.
for_save = False
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super().__init__(output_field=output_field)
self.value = value
def __repr__(self):
return f"{self.__class__.__name__}({self.value!r})"
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
output_field = self._output_field_or_none
if output_field is not None:
if self.for_save:
val = output_field.get_db_prep_save(val, connection=connection)
else:
val = output_field.get_db_prep_value(val, connection=connection)
if hasattr(output_field, "get_placeholder"):
return output_field.get_placeholder(val, compiler, connection), [val]
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return "NULL", []
return "%s", [val]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self, alias=None):
return []
def _resolve_output_field(self):
if isinstance(self.value, str):
return fields.CharField()
if isinstance(self.value, bool):
return fields.BooleanField()
if isinstance(self.value, int):
return fields.IntegerField()
if isinstance(self.value, float):
return fields.FloatField()
if isinstance(self.value, datetime.datetime):
return fields.DateTimeField()
if isinstance(self.value, datetime.date):
return fields.DateField()
if isinstance(self.value, datetime.time):
return fields.TimeField()
if isinstance(self.value, datetime.timedelta):
return fields.DurationField()
if isinstance(self.value, Decimal):
return fields.DecimalField()
if isinstance(self.value, bytes):
return fields.BinaryField()
if isinstance(self.value, UUID):
return fields.UUIDField()
@property
def empty_result_set_value(self):
return self.value
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super().__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return "(%s)" % self.sql, self.params
def get_group_by_cols(self, alias=None):
return [self]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# Resolve parents fields used in raw SQL.
if query.model:
for parent in query.model._meta.get_parent_list():
for parent_field in parent._meta.local_fields:
_, column_name = parent_field.get_attname_column()
if column_name.lower() in self.sql.lower():
query.resolve_ref(
parent_field.name, allow_joins, reuse, summarize
)
break
return super().resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return "*", []
class Col(Expression):
contains_column_references = True
possibly_multivalued = False
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super().__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
alias, target = self.alias, self.target
identifiers = (alias, str(target)) if alias else (str(target),)
return "{}({})".format(self.__class__.__name__, ", ".join(identifiers))
def as_sql(self, compiler, connection):
alias, column = self.alias, self.target.column
identifiers = (alias, column) if alias else (column,)
sql = ".".join(map(compiler.quote_name_unless_alias, identifiers))
return sql, []
def relabeled_clone(self, relabels):
if self.alias is None:
return self
return self.__class__(
relabels.get(self.alias, self.alias), self.target, self.output_field
)
def get_group_by_cols(self, alias=None):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return self.output_field.get_db_converters(
connection
) + self.target.get_db_converters(connection)
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super().__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
(self.source,) = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return connection.ops.quote_name(self.refs), []
def get_group_by_cols(self, alias=None):
return [self]
class ExpressionList(Func):
"""
An expression containing multiple expressions. Can be used to provide a
list of expressions as an argument to another expression, like a partition
clause.
"""
template = "%(expressions)s"
def __init__(self, *expressions, **extra):
if not expressions:
raise ValueError(
"%s requires at least one expression." % self.__class__.__name__
)
super().__init__(*expressions, **extra)
def __str__(self):
return self.arg_joiner.join(str(arg) for arg in self.source_expressions)
def as_sqlite(self, compiler, connection, **extra_context):
# Casting to numeric is unnecessary.
return self.as_sql(compiler, connection, **extra_context)
class OrderByList(Func):
template = "ORDER BY %(expressions)s"
def __init__(self, *expressions, **extra):
expressions = (
(
OrderBy(F(expr[1:]), descending=True)
if isinstance(expr, str) and expr[0] == "-"
else expr
)
for expr in expressions
)
super().__init__(*expressions, **extra)
def as_sql(self, *args, **kwargs):
if not self.source_expressions:
return "", ()
return super().as_sql(*args, **kwargs)
def get_group_by_cols(self):
group_by_cols = []
for order_by in self.get_source_expressions():
group_by_cols.extend(order_by.get_group_by_cols())
return group_by_cols
@deconstructible(path="django.db.models.ExpressionWrapper")
class ExpressionWrapper(SQLiteNumericMixin, Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super().__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def get_group_by_cols(self, alias=None):
if isinstance(self.expression, Expression):
expression = self.expression.copy()
expression.output_field = self.output_field
return expression.get_group_by_cols(alias=alias)
# For non-expressions e.g. an SQL WHERE clause, the entire
# `expression` must be included in the GROUP BY clause.
return super().get_group_by_cols()
def as_sql(self, compiler, connection):
return compiler.compile(self.expression)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
@deconstructible(path="django.db.models.When")
class When(Expression):
template = "WHEN %(condition)s THEN %(result)s"
# This isn't a complete conditional expression, must be used in Case().
conditional = False
def __init__(self, condition=None, then=None, **lookups):
if lookups:
if condition is None:
condition, lookups = Q(**lookups), None
elif getattr(condition, "conditional", False):
condition, lookups = Q(condition, **lookups), None
if condition is None or not getattr(condition, "conditional", False) or lookups:
raise TypeError(
"When() supports a Q object, a boolean expression, or lookups "
"as a condition."
)
if isinstance(condition, Q) and not condition:
raise ValueError("An empty Q() can't be used as a When() condition.")
super().__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
if hasattr(c.condition, "resolve_expression"):
c.condition = c.condition.resolve_expression(
query, allow_joins, reuse, summarize, False
)
c.result = c.result.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = extra_context
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
# Filters that match everything are handled as empty strings in the
# WHERE clause, but in a CASE WHEN expression they must use a predicate
# that's always True.
if condition_sql == "":
if connection.features.supports_boolean_expr_in_select_clause:
condition_sql, condition_params = compiler.compile(Value(True))
else:
condition_sql, condition_params = "1=1", ()
template_params["condition"] = condition_sql
result_sql, result_params = compiler.compile(self.result)
template_params["result"] = result_sql
template = template or self.template
return template % template_params, (
*sql_params,
*condition_params,
*result_params,
)
def get_group_by_cols(self, alias=None):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
@deconstructible(path="django.db.models.Case")
class Case(SQLiteNumericMixin, Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = "CASE %(cases)s ELSE %(default)s END"
case_joiner = " "
def __init__(self, *cases, default=None, output_field=None, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
super().__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
self.extra = extra
def __str__(self):
return "CASE %s, ELSE %r" % (
", ".join(str(c) for c in self.cases),
self.default,
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
*self.cases, self.default = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
c.default = c.default.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def copy(self):
c = super().copy()
c.cases = c.cases[:]
return c
def as_sql(
self, compiler, connection, template=None, case_joiner=None, **extra_context
):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = {**self.extra, **extra_context}
case_parts = []
sql_params = []
for case in self.cases:
try:
case_sql, case_params = compiler.compile(case)
except EmptyResultSet:
continue
case_parts.append(case_sql)
sql_params.extend(case_params)
default_sql, default_params = compiler.compile(self.default)
if not case_parts:
return default_sql, default_params
case_joiner = case_joiner or self.case_joiner
template_params["cases"] = case_joiner.join(case_parts)
template_params["default"] = default_sql
sql_params.extend(default_params)
template = template or template_params.get("template", self.template)
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
def get_group_by_cols(self, alias=None):
if not self.cases:
return self.default.get_group_by_cols(alias)
return super().get_group_by_cols(alias)
class Subquery(BaseExpression, Combinable):
"""
An explicit subquery. It may contain OuterRef() references to the outer
query which will be resolved when it is applied to that query.
"""
template = "(%(subquery)s)"
contains_aggregate = False
empty_result_set_value = None
def __init__(self, queryset, output_field=None, **extra):
# Allow the usage of both QuerySet and sql.Query objects.
self.query = getattr(queryset, "query", queryset).clone()
self.query.subquery = True
self.extra = extra
super().__init__(output_field)
def get_source_expressions(self):
return [self.query]
def set_source_expressions(self, exprs):
self.query = exprs[0]
def _resolve_output_field(self):
return self.query.output_field
def copy(self):
clone = super().copy()
clone.query = clone.query.clone()
return clone
@property
def external_aliases(self):
return self.query.external_aliases
def get_external_cols(self):
return self.query.get_external_cols()
def as_sql(self, compiler, connection, template=None, query=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = {**self.extra, **extra_context}
query = query or self.query
subquery_sql, sql_params = query.as_sql(compiler, connection)
template_params["subquery"] = subquery_sql[1:-1]
template = template or template_params.get("template", self.template)
sql = template % template_params
return sql, sql_params
def get_group_by_cols(self, alias=None):
# If this expression is referenced by an alias for an explicit GROUP BY
# through values() a reference to this expression and not the
# underlying .query must be returned to ensure external column
# references are not grouped against as well.
if alias:
return [Ref(alias, self)]
return self.query.get_group_by_cols()
class Exists(Subquery):
template = "EXISTS(%(subquery)s)"
output_field = fields.BooleanField()
def __init__(self, queryset, negated=False, **kwargs):
self.negated = negated
super().__init__(queryset, **kwargs)
def __invert__(self):
clone = self.copy()
clone.negated = not self.negated
return clone
def get_group_by_cols(self, alias=None):
# self.query only gets limited to a single row in the .exists() call
# from self.as_sql() so deferring to Query.get_group_by_cols() is
# inappropriate.
if alias is None:
return [self]
return super().get_group_by_cols(alias)
def as_sql(self, compiler, connection, template=None, **extra_context):
query = self.query.exists(using=connection.alias)
try:
sql, params = super().as_sql(
compiler,
connection,
template=template,
query=query,
**extra_context,
)
except EmptyResultSet:
if self.negated:
features = compiler.connection.features
if not features.supports_boolean_expr_in_select_clause:
return "1=1", ()
return compiler.compile(Value(True))
raise
if self.negated:
sql = "NOT {}".format(sql)
return sql, params
def select_format(self, compiler, sql, params):
# Wrap EXISTS() with a CASE WHEN expression if a database backend
# (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP
# BY list.
if not compiler.connection.features.supports_boolean_expr_in_select_clause:
sql = "CASE WHEN {} THEN 1 ELSE 0 END".format(sql)
return sql, params
@deconstructible(path="django.db.models.OrderBy")
class OrderBy(Expression):
template = "%(expression)s %(ordering)s"
conditional = False
def __init__(self, expression, descending=False, nulls_first=None, nulls_last=None):
if nulls_first and nulls_last:
raise ValueError("nulls_first and nulls_last are mutually exclusive")
if nulls_first is False or nulls_last is False:
# When the deprecation ends, replace with:
# raise ValueError(
# "nulls_first and nulls_last values must be True or None."
# )
warnings.warn(
"Passing nulls_first=False or nulls_last=False is deprecated, use None "
"instead.",
RemovedInDjango50Warning,
stacklevel=2,
)
self.nulls_first = nulls_first
self.nulls_last = nulls_last
self.descending = descending
if not hasattr(expression, "resolve_expression"):
raise ValueError("expression must be an expression type")
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending
)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection, template=None, **extra_context):
template = template or self.template
if connection.features.supports_order_by_nulls_modifier:
if self.nulls_last:
template = "%s NULLS LAST" % template
elif self.nulls_first:
template = "%s NULLS FIRST" % template
else:
if self.nulls_last and not (
self.descending and connection.features.order_by_nulls_first
):
template = "%%(expression)s IS NULL, %s" % template
elif self.nulls_first and not (
not self.descending and connection.features.order_by_nulls_first
):
template = "%%(expression)s IS NOT NULL, %s" % template
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {
"expression": expression_sql,
"ordering": "DESC" if self.descending else "ASC",
**extra_context,
}
params *= template.count("%(expression)s")
return (template % placeholders).rstrip(), params
def as_oracle(self, compiler, connection):
# Oracle doesn't allow ORDER BY EXISTS() or filters unless it's wrapped
# in a CASE WHEN.
if connection.ops.conditional_expression_supported_in_where_clause(
self.expression
):
copy = self.copy()
copy.expression = Case(
When(self.expression, then=True),
default=False,
)
return copy.as_sql(compiler, connection)
return self.as_sql(compiler, connection)
def get_group_by_cols(self, alias=None):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
if self.nulls_first:
self.nulls_last = True
self.nulls_first = None
elif self.nulls_last:
self.nulls_first = True
self.nulls_last = None
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
class Window(SQLiteNumericMixin, Expression):
template = "%(expression)s OVER (%(window)s)"
# Although the main expression may either be an aggregate or an
# expression with an aggregate function, the GROUP BY that will
# be introduced in the query as a result is not desired.
contains_aggregate = False
contains_over_clause = True
def __init__(
self,
expression,
partition_by=None,
order_by=None,
frame=None,
output_field=None,
):
self.partition_by = partition_by
self.order_by = order_by
self.frame = frame
if not getattr(expression, "window_compatible", False):
raise ValueError(
"Expression '%s' isn't compatible with OVER clauses."
% expression.__class__.__name__
)
if self.partition_by is not None:
if not isinstance(self.partition_by, (tuple, list)):
self.partition_by = (self.partition_by,)
self.partition_by = ExpressionList(*self.partition_by)
if self.order_by is not None:
if isinstance(self.order_by, (list, tuple)):
self.order_by = OrderByList(*self.order_by)
elif isinstance(self.order_by, (BaseExpression, str)):
self.order_by = OrderByList(self.order_by)
else:
raise ValueError(
"Window.order_by must be either a string reference to a "
"field, an expression, or a list or tuple of them."
)
super().__init__(output_field=output_field)
self.source_expression = self._parse_expressions(expression)[0]
def _resolve_output_field(self):
return self.source_expression.output_field
def get_source_expressions(self):
return [self.source_expression, self.partition_by, self.order_by, self.frame]
def set_source_expressions(self, exprs):
self.source_expression, self.partition_by, self.order_by, self.frame = exprs
def as_sql(self, compiler, connection, template=None):
connection.ops.check_expression_support(self)
if not connection.features.supports_over_clause:
raise NotSupportedError("This backend does not support window expressions.")
expr_sql, params = compiler.compile(self.source_expression)
window_sql, window_params = [], ()
if self.partition_by is not None:
sql_expr, sql_params = self.partition_by.as_sql(
compiler=compiler,
connection=connection,
template="PARTITION BY %(expressions)s",
)
window_sql.append(sql_expr)
window_params += tuple(sql_params)
if self.order_by is not None:
order_sql, order_params = compiler.compile(self.order_by)
window_sql.append(order_sql)
window_params += tuple(order_params)
if self.frame:
frame_sql, frame_params = compiler.compile(self.frame)
window_sql.append(frame_sql)
window_params += tuple(frame_params)
template = template or self.template
return (
template % {"expression": expr_sql, "window": " ".join(window_sql).strip()},
(*params, *window_params),
)
def as_sqlite(self, compiler, connection):
if isinstance(self.output_field, fields.DecimalField):
# Casting to numeric must be outside of the window expression.
copy = self.copy()
source_expressions = copy.get_source_expressions()
source_expressions[0].output_field = fields.FloatField()
copy.set_source_expressions(source_expressions)
return super(Window, copy).as_sqlite(compiler, connection)
return self.as_sql(compiler, connection)
def __str__(self):
return "{} OVER ({}{}{})".format(
str(self.source_expression),
"PARTITION BY " + str(self.partition_by) if self.partition_by else "",
str(self.order_by or ""),
str(self.frame or ""),
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self, alias=None):
group_by_cols = []
if self.partition_by:
group_by_cols.extend(self.partition_by.get_group_by_cols())
if self.order_by is not None:
group_by_cols.extend(self.order_by.get_group_by_cols())
return group_by_cols
class WindowFrame(Expression):
"""
Model the frame clause in window expressions. There are two types of frame
clauses which are subclasses, however, all processing and validation (by no
means intended to be complete) is done here. Thus, providing an end for a
frame is optional (the default is UNBOUNDED FOLLOWING, which is the last
row in the frame).
"""
template = "%(frame_type)s BETWEEN %(start)s AND %(end)s"
def __init__(self, start=None, end=None):
self.start = Value(start)
self.end = Value(end)
def set_source_expressions(self, exprs):
self.start, self.end = exprs
def get_source_expressions(self):
return [self.start, self.end]
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
start, end = self.window_frame_start_end(
connection, self.start.value, self.end.value
)
return (
self.template
% {
"frame_type": self.frame_type,
"start": start,
"end": end,
},
[],
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self, alias=None):
return []
def __str__(self):
if self.start.value is not None and self.start.value < 0:
start = "%d %s" % (abs(self.start.value), connection.ops.PRECEDING)
elif self.start.value is not None and self.start.value == 0:
start = connection.ops.CURRENT_ROW
else:
start = connection.ops.UNBOUNDED_PRECEDING
if self.end.value is not None and self.end.value > 0:
end = "%d %s" % (self.end.value, connection.ops.FOLLOWING)
elif self.end.value is not None and self.end.value == 0:
end = connection.ops.CURRENT_ROW
else:
end = connection.ops.UNBOUNDED_FOLLOWING
return self.template % {
"frame_type": self.frame_type,
"start": start,
"end": end,
}
def window_frame_start_end(self, connection, start, end):
raise NotImplementedError("Subclasses must implement window_frame_start_end().")
class RowRange(WindowFrame):
frame_type = "ROWS"
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_rows_start_end(start, end)
class ValueRange(WindowFrame):
frame_type = "RANGE"
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_range_start_end(start, end)
|
db0df90213c15652dff89804e4dcb10374bcc40122ec27c2bb591ca713728049 | import datetime
import importlib
import io
import os
import shutil
import sys
from unittest import mock
from django.apps import apps
from django.core.management import CommandError, call_command
from django.db import (
ConnectionHandler,
DatabaseError,
OperationalError,
connection,
connections,
models,
)
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.backends.utils import truncate_name
from django.db.migrations.exceptions import InconsistentMigrationHistory
from django.db.migrations.recorder import MigrationRecorder
from django.test import TestCase, override_settings, skipUnlessDBFeature
from django.test.utils import captured_stdout
from django.utils import timezone
from django.utils.version import get_docs_version
from .models import UnicodeModel, UnserializableModel
from .routers import TestRouter
from .test_base import MigrationTestBase
HAS_BLACK = shutil.which("black")
class MigrateTests(MigrationTestBase):
"""
Tests running the migrate command.
"""
databases = {"default", "other"}
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_migrate(self):
"""
Tests basic usage of the migrate command.
"""
# No tables are created
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
# Run the migrations to 0001 only
stdout = io.StringIO()
call_command(
"migrate", "migrations", "0001", verbosity=2, stdout=stdout, no_color=True
)
stdout = stdout.getvalue()
self.assertIn(
"Target specific migration: 0001_initial, from migrations", stdout
)
self.assertIn("Applying migrations.0001_initial... OK", stdout)
self.assertIn("Running pre-migrate handlers for application migrations", stdout)
self.assertIn(
"Running post-migrate handlers for application migrations", stdout
)
# The correct tables exist
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
# Run migrations all the way
call_command("migrate", verbosity=0)
# The correct tables exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
# Unmigrate everything
stdout = io.StringIO()
call_command(
"migrate", "migrations", "zero", verbosity=2, stdout=stdout, no_color=True
)
stdout = stdout.getvalue()
self.assertIn("Unapply all migrations: migrations", stdout)
self.assertIn("Unapplying migrations.0002_second... OK", stdout)
self.assertIn("Running pre-migrate handlers for application migrations", stdout)
self.assertIn(
"Running post-migrate handlers for application migrations", stdout
)
# Tables are gone
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
@override_settings(
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"migrations.migrations_test_apps.migrated_app",
]
)
def test_migrate_with_system_checks(self):
out = io.StringIO()
call_command("migrate", skip_checks=False, no_color=True, stdout=out)
self.assertIn("Apply all migrations: migrated_app", out.getvalue())
@override_settings(
INSTALLED_APPS=[
"migrations",
"migrations.migrations_test_apps.unmigrated_app_syncdb",
]
)
def test_app_without_migrations(self):
msg = "App 'unmigrated_app_syncdb' does not have migrations."
with self.assertRaisesMessage(CommandError, msg):
call_command("migrate", app_label="unmigrated_app_syncdb")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_clashing_prefix"}
)
def test_ambiguous_prefix(self):
msg = (
"More than one migration matches 'a' in app 'migrations'. Please "
"be more specific."
)
with self.assertRaisesMessage(CommandError, msg):
call_command("migrate", app_label="migrations", migration_name="a")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_unknown_prefix(self):
msg = "Cannot find a migration matching 'nonexistent' from app 'migrations'."
with self.assertRaisesMessage(CommandError, msg):
call_command(
"migrate", app_label="migrations", migration_name="nonexistent"
)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_initial_false"}
)
def test_migrate_initial_false(self):
"""
`Migration.initial = False` skips fake-initial detection.
"""
# Make sure no tables are created
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Run the migrations to 0001 only
call_command("migrate", "migrations", "0001", verbosity=0)
# Fake rollback
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
# Make sure fake-initial detection does not run
with self.assertRaises(DatabaseError):
call_command(
"migrate", "migrations", "0001", fake_initial=True, verbosity=0
)
call_command("migrate", "migrations", "0001", fake=True, verbosity=0)
# Real rollback
call_command("migrate", "migrations", "zero", verbosity=0)
# Make sure it's all gone
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations"},
DATABASE_ROUTERS=["migrations.routers.TestRouter"],
)
def test_migrate_fake_initial(self):
"""
--fake-initial only works if all tables created in the initial
migration of an app exists. Database routers must be obeyed when doing
that check.
"""
# Make sure no tables are created
for db in self.databases:
self.assertTableNotExists("migrations_author", using=db)
self.assertTableNotExists("migrations_tribble", using=db)
# Run the migrations to 0001 only
call_command("migrate", "migrations", "0001", verbosity=0)
call_command("migrate", "migrations", "0001", verbosity=0, database="other")
# Make sure the right tables exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Also check the "other" database
self.assertTableNotExists("migrations_author", using="other")
self.assertTableExists("migrations_tribble", using="other")
# Fake a roll-back
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
call_command(
"migrate", "migrations", "zero", fake=True, verbosity=0, database="other"
)
# Make sure the tables still exist
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble", using="other")
# Try to run initial migration
with self.assertRaises(DatabaseError):
call_command("migrate", "migrations", "0001", verbosity=0)
# Run initial migration with an explicit --fake-initial
out = io.StringIO()
with mock.patch(
"django.core.management.color.supports_color", lambda *args: False
):
call_command(
"migrate",
"migrations",
"0001",
fake_initial=True,
stdout=out,
verbosity=1,
)
call_command(
"migrate",
"migrations",
"0001",
fake_initial=True,
verbosity=0,
database="other",
)
self.assertIn("migrations.0001_initial... faked", out.getvalue().lower())
try:
# Run migrations all the way.
call_command("migrate", verbosity=0)
call_command("migrate", verbosity=0, database="other")
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
self.assertTableNotExists("migrations_author", using="other")
self.assertTableNotExists("migrations_tribble", using="other")
self.assertTableNotExists("migrations_book", using="other")
# Fake a roll-back.
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
call_command(
"migrate",
"migrations",
"zero",
fake=True,
verbosity=0,
database="other",
)
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
# Run initial migration.
with self.assertRaises(DatabaseError):
call_command("migrate", "migrations", verbosity=0)
# Run initial migration with an explicit --fake-initial.
with self.assertRaises(DatabaseError):
# Fails because "migrations_tribble" does not exist but needs
# to in order to make --fake-initial work.
call_command("migrate", "migrations", fake_initial=True, verbosity=0)
# Fake an apply.
call_command("migrate", "migrations", fake=True, verbosity=0)
call_command(
"migrate", "migrations", fake=True, verbosity=0, database="other"
)
finally:
# Unmigrate everything.
call_command("migrate", "migrations", "zero", verbosity=0)
call_command("migrate", "migrations", "zero", verbosity=0, database="other")
# Make sure it's all gone
for db in self.databases:
self.assertTableNotExists("migrations_author", using=db)
self.assertTableNotExists("migrations_tribble", using=db)
self.assertTableNotExists("migrations_book", using=db)
@skipUnlessDBFeature("ignores_table_name_case")
def test_migrate_fake_initial_case_insensitive(self):
with override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_fake_initial_case_insensitive.initial",
}
):
call_command("migrate", "migrations", "0001", verbosity=0)
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
with override_settings(
MIGRATION_MODULES={
"migrations": (
"migrations.test_fake_initial_case_insensitive.fake_initial"
),
}
):
out = io.StringIO()
call_command(
"migrate",
"migrations",
"0001",
fake_initial=True,
stdout=out,
verbosity=1,
no_color=True,
)
self.assertIn(
"migrations.0001_initial... faked",
out.getvalue().lower(),
)
@override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_migrations_fake_split_initial"
}
)
def test_migrate_fake_split_initial(self):
"""
Split initial migrations can be faked with --fake-initial.
"""
try:
call_command("migrate", "migrations", "0002", verbosity=0)
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
out = io.StringIO()
with mock.patch(
"django.core.management.color.supports_color", lambda *args: False
):
call_command(
"migrate",
"migrations",
"0002",
fake_initial=True,
stdout=out,
verbosity=1,
)
value = out.getvalue().lower()
self.assertIn("migrations.0001_initial... faked", value)
self.assertIn("migrations.0002_second... faked", value)
finally:
# Fake an apply.
call_command("migrate", "migrations", fake=True, verbosity=0)
# Unmigrate everything.
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"}
)
def test_migrate_conflict_exit(self):
"""
migrate exits if it detects a conflict.
"""
msg = (
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (0002_conflicting_second, 0002_second in "
"migrations).\n"
"To fix them run 'python manage.py makemigrations --merge'"
)
with self.assertRaisesMessage(CommandError, msg):
call_command("migrate", "migrations")
@override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_migrations",
}
)
def test_migrate_check(self):
with self.assertRaises(SystemExit):
call_command("migrate", "migrations", "0001", check_unapplied=True)
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
]
)
def test_migrate_check_migrated_app(self):
out = io.StringIO()
try:
call_command("migrate", "migrated_app", verbosity=0)
call_command(
"migrate",
"migrated_app",
stdout=out,
check_unapplied=True,
)
self.assertEqual(out.getvalue(), "")
finally:
# Unmigrate everything.
call_command("migrate", "migrated_app", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_migrations_plan",
}
)
def test_migrate_check_plan(self):
out = io.StringIO()
with self.assertRaises(SystemExit):
call_command(
"migrate",
"migrations",
"0001",
check_unapplied=True,
plan=True,
stdout=out,
no_color=True,
)
self.assertEqual(
"Planned operations:\n"
"migrations.0001_initial\n"
" Create model Salamander\n"
" Raw Python operation -> Grow salamander tail.\n",
out.getvalue(),
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_showmigrations_list(self):
"""
showmigrations --list displays migrations and whether or not they're
applied.
"""
out = io.StringIO()
with mock.patch(
"django.core.management.color.supports_color", lambda *args: True
):
call_command(
"showmigrations", format="list", stdout=out, verbosity=0, no_color=False
)
self.assertEqual(
"\x1b[1mmigrations\n\x1b[0m [ ] 0001_initial\n [ ] 0002_second\n",
out.getvalue().lower(),
)
call_command("migrate", "migrations", "0001", verbosity=0)
out = io.StringIO()
# Giving the explicit app_label tests for selective `show_list` in the command
call_command(
"showmigrations",
"migrations",
format="list",
stdout=out,
verbosity=0,
no_color=True,
)
self.assertEqual(
"migrations\n [x] 0001_initial\n [ ] 0002_second\n", out.getvalue().lower()
)
out = io.StringIO()
# Applied datetimes are displayed at verbosity 2+.
call_command(
"showmigrations", "migrations", stdout=out, verbosity=2, no_color=True
)
migration1 = MigrationRecorder(connection).migration_qs.get(
app="migrations", name="0001_initial"
)
self.assertEqual(
"migrations\n"
" [x] 0001_initial (applied at %s)\n"
" [ ] 0002_second\n" % migration1.applied.strftime("%Y-%m-%d %H:%M:%S"),
out.getvalue().lower(),
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_showmigrations_list_squashed(self):
out = io.StringIO()
call_command(
"showmigrations", format="list", stdout=out, verbosity=2, no_color=True
)
self.assertEqual(
"migrations\n [ ] 0001_squashed_0002 (2 squashed migrations)\n",
out.getvalue().lower(),
)
out = io.StringIO()
call_command(
"migrate",
"migrations",
"0001_squashed_0002",
stdout=out,
verbosity=2,
no_color=True,
)
try:
self.assertIn(
"operations to perform:\n"
" target specific migration: 0001_squashed_0002, from migrations\n"
"running pre-migrate handlers for application migrations\n"
"running migrations:\n"
" applying migrations.0001_squashed_0002... ok (",
out.getvalue().lower(),
)
out = io.StringIO()
call_command(
"showmigrations", format="list", stdout=out, verbosity=2, no_color=True
)
self.assertEqual(
"migrations\n [x] 0001_squashed_0002 (2 squashed migrations)\n",
out.getvalue().lower(),
)
finally:
# Unmigrate everything.
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"}
)
def test_showmigrations_plan(self):
"""
Tests --plan output of showmigrations command
"""
out = io.StringIO()
call_command("showmigrations", format="plan", stdout=out)
self.assertEqual(
"[ ] migrations.0001_initial\n"
"[ ] migrations.0003_third\n"
"[ ] migrations.0002_second\n",
out.getvalue().lower(),
)
out = io.StringIO()
call_command("showmigrations", format="plan", stdout=out, verbosity=2)
self.assertEqual(
"[ ] migrations.0001_initial\n"
"[ ] migrations.0003_third ... (migrations.0001_initial)\n"
"[ ] migrations.0002_second ... (migrations.0001_initial, "
"migrations.0003_third)\n",
out.getvalue().lower(),
)
call_command("migrate", "migrations", "0003", verbosity=0)
out = io.StringIO()
call_command("showmigrations", format="plan", stdout=out)
self.assertEqual(
"[x] migrations.0001_initial\n"
"[x] migrations.0003_third\n"
"[ ] migrations.0002_second\n",
out.getvalue().lower(),
)
out = io.StringIO()
call_command("showmigrations", format="plan", stdout=out, verbosity=2)
self.assertEqual(
"[x] migrations.0001_initial\n"
"[x] migrations.0003_third ... (migrations.0001_initial)\n"
"[ ] migrations.0002_second ... (migrations.0001_initial, "
"migrations.0003_third)\n",
out.getvalue().lower(),
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_plan"}
)
def test_migrate_plan(self):
"""Tests migrate --plan output."""
out = io.StringIO()
# Show the plan up to the third migration.
call_command(
"migrate", "migrations", "0003", plan=True, stdout=out, no_color=True
)
self.assertEqual(
"Planned operations:\n"
"migrations.0001_initial\n"
" Create model Salamander\n"
" Raw Python operation -> Grow salamander tail.\n"
"migrations.0002_second\n"
" Create model Book\n"
" Raw SQL operation -> ['SELECT * FROM migrations_book']\n"
"migrations.0003_third\n"
" Create model Author\n"
" Raw SQL operation -> ['SELECT * FROM migrations_author']\n",
out.getvalue(),
)
try:
# Migrate to the third migration.
call_command("migrate", "migrations", "0003", verbosity=0)
out = io.StringIO()
# Show the plan for when there is nothing to apply.
call_command(
"migrate", "migrations", "0003", plan=True, stdout=out, no_color=True
)
self.assertEqual(
"Planned operations:\n No planned migration operations.\n",
out.getvalue(),
)
out = io.StringIO()
# Show the plan for reverse migration back to 0001.
call_command(
"migrate", "migrations", "0001", plan=True, stdout=out, no_color=True
)
self.assertEqual(
"Planned operations:\n"
"migrations.0003_third\n"
" Undo Create model Author\n"
" Raw SQL operation -> ['SELECT * FROM migrations_book']\n"
"migrations.0002_second\n"
" Undo Create model Book\n"
" Raw SQL operation -> ['SELECT * FROM migrations_salamand…\n",
out.getvalue(),
)
out = io.StringIO()
# Show the migration plan to fourth, with truncated details.
call_command(
"migrate", "migrations", "0004", plan=True, stdout=out, no_color=True
)
self.assertEqual(
"Planned operations:\n"
"migrations.0004_fourth\n"
" Raw SQL operation -> SELECT * FROM migrations_author WHE…\n",
out.getvalue(),
)
# Show the plan when an operation is irreversible.
# Migrate to the fourth migration.
call_command("migrate", "migrations", "0004", verbosity=0)
out = io.StringIO()
call_command(
"migrate", "migrations", "0003", plan=True, stdout=out, no_color=True
)
self.assertEqual(
"Planned operations:\n"
"migrations.0004_fourth\n"
" Raw SQL operation -> IRREVERSIBLE\n",
out.getvalue(),
)
out = io.StringIO()
call_command(
"migrate", "migrations", "0005", plan=True, stdout=out, no_color=True
)
# Operation is marked as irreversible only in the revert plan.
self.assertEqual(
"Planned operations:\n"
"migrations.0005_fifth\n"
" Raw Python operation\n"
" Raw Python operation\n"
" Raw Python operation -> Feed salamander.\n",
out.getvalue(),
)
call_command("migrate", "migrations", "0005", verbosity=0)
out = io.StringIO()
call_command(
"migrate", "migrations", "0004", plan=True, stdout=out, no_color=True
)
self.assertEqual(
"Planned operations:\n"
"migrations.0005_fifth\n"
" Raw Python operation -> IRREVERSIBLE\n"
" Raw Python operation -> IRREVERSIBLE\n"
" Raw Python operation\n",
out.getvalue(),
)
finally:
# Cleanup by unmigrating everything: fake the irreversible, then
# migrate all to zero.
call_command("migrate", "migrations", "0003", fake=True, verbosity=0)
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_empty"}
)
def test_showmigrations_no_migrations(self):
out = io.StringIO()
call_command("showmigrations", stdout=out, no_color=True)
self.assertEqual("migrations\n (no migrations)\n", out.getvalue().lower())
@override_settings(
INSTALLED_APPS=["migrations.migrations_test_apps.unmigrated_app"]
)
def test_showmigrations_unmigrated_app(self):
out = io.StringIO()
call_command("showmigrations", "unmigrated_app", stdout=out, no_color=True)
try:
self.assertEqual(
"unmigrated_app\n (no migrations)\n", out.getvalue().lower()
)
finally:
# unmigrated_app.SillyModel has a foreign key to
# 'migrations.Tribble', but that model is only defined in a
# migration, so the global app registry never sees it and the
# reference is left dangling. Remove it to avoid problems in
# subsequent tests.
apps._pending_operations.pop(("migrations", "tribble"), None)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_empty"}
)
def test_showmigrations_plan_no_migrations(self):
"""
Tests --plan output of showmigrations command without migrations
"""
out = io.StringIO()
call_command("showmigrations", format="plan", stdout=out, no_color=True)
self.assertEqual("(no migrations)\n", out.getvalue().lower())
out = io.StringIO()
call_command(
"showmigrations", format="plan", stdout=out, verbosity=2, no_color=True
)
self.assertEqual("(no migrations)\n", out.getvalue().lower())
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"}
)
def test_showmigrations_plan_squashed(self):
"""
Tests --plan output of showmigrations command with squashed migrations.
"""
out = io.StringIO()
call_command("showmigrations", format="plan", stdout=out)
self.assertEqual(
"[ ] migrations.1_auto\n"
"[ ] migrations.2_auto\n"
"[ ] migrations.3_squashed_5\n"
"[ ] migrations.6_auto\n"
"[ ] migrations.7_auto\n",
out.getvalue().lower(),
)
out = io.StringIO()
call_command("showmigrations", format="plan", stdout=out, verbosity=2)
self.assertEqual(
"[ ] migrations.1_auto\n"
"[ ] migrations.2_auto ... (migrations.1_auto)\n"
"[ ] migrations.3_squashed_5 ... (migrations.2_auto)\n"
"[ ] migrations.6_auto ... (migrations.3_squashed_5)\n"
"[ ] migrations.7_auto ... (migrations.6_auto)\n",
out.getvalue().lower(),
)
call_command("migrate", "migrations", "3_squashed_5", verbosity=0)
out = io.StringIO()
call_command("showmigrations", format="plan", stdout=out)
self.assertEqual(
"[x] migrations.1_auto\n"
"[x] migrations.2_auto\n"
"[x] migrations.3_squashed_5\n"
"[ ] migrations.6_auto\n"
"[ ] migrations.7_auto\n",
out.getvalue().lower(),
)
out = io.StringIO()
call_command("showmigrations", format="plan", stdout=out, verbosity=2)
self.assertEqual(
"[x] migrations.1_auto\n"
"[x] migrations.2_auto ... (migrations.1_auto)\n"
"[x] migrations.3_squashed_5 ... (migrations.2_auto)\n"
"[ ] migrations.6_auto ... (migrations.3_squashed_5)\n"
"[ ] migrations.7_auto ... (migrations.6_auto)\n",
out.getvalue().lower(),
)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.mutate_state_b",
"migrations.migrations_test_apps.alter_fk.author_app",
"migrations.migrations_test_apps.alter_fk.book_app",
]
)
def test_showmigrations_plan_single_app_label(self):
"""
`showmigrations --plan app_label` output with a single app_label.
"""
# Single app with no dependencies on other apps.
out = io.StringIO()
call_command("showmigrations", "mutate_state_b", format="plan", stdout=out)
self.assertEqual(
"[ ] mutate_state_b.0001_initial\n[ ] mutate_state_b.0002_add_field\n",
out.getvalue(),
)
# Single app with dependencies.
out = io.StringIO()
call_command("showmigrations", "author_app", format="plan", stdout=out)
self.assertEqual(
"[ ] author_app.0001_initial\n"
"[ ] book_app.0001_initial\n"
"[ ] author_app.0002_alter_id\n",
out.getvalue(),
)
# Some migrations already applied.
call_command("migrate", "author_app", "0001", verbosity=0)
out = io.StringIO()
call_command("showmigrations", "author_app", format="plan", stdout=out)
self.assertEqual(
"[X] author_app.0001_initial\n"
"[ ] book_app.0001_initial\n"
"[ ] author_app.0002_alter_id\n",
out.getvalue(),
)
# Cleanup by unmigrating author_app.
call_command("migrate", "author_app", "zero", verbosity=0)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.mutate_state_b",
"migrations.migrations_test_apps.alter_fk.author_app",
"migrations.migrations_test_apps.alter_fk.book_app",
]
)
def test_showmigrations_plan_multiple_app_labels(self):
"""
`showmigrations --plan app_label` output with multiple app_labels.
"""
# Multiple apps: author_app depends on book_app; mutate_state_b doesn't
# depend on other apps.
out = io.StringIO()
call_command(
"showmigrations", "mutate_state_b", "author_app", format="plan", stdout=out
)
self.assertEqual(
"[ ] author_app.0001_initial\n"
"[ ] book_app.0001_initial\n"
"[ ] author_app.0002_alter_id\n"
"[ ] mutate_state_b.0001_initial\n"
"[ ] mutate_state_b.0002_add_field\n",
out.getvalue(),
)
# Multiple apps: args order shouldn't matter (the same result is
# expected as above).
out = io.StringIO()
call_command(
"showmigrations", "author_app", "mutate_state_b", format="plan", stdout=out
)
self.assertEqual(
"[ ] author_app.0001_initial\n"
"[ ] book_app.0001_initial\n"
"[ ] author_app.0002_alter_id\n"
"[ ] mutate_state_b.0001_initial\n"
"[ ] mutate_state_b.0002_add_field\n",
out.getvalue(),
)
@override_settings(
INSTALLED_APPS=["migrations.migrations_test_apps.unmigrated_app"]
)
def test_showmigrations_plan_app_label_no_migrations(self):
out = io.StringIO()
call_command(
"showmigrations", "unmigrated_app", format="plan", stdout=out, no_color=True
)
try:
self.assertEqual("(no migrations)\n", out.getvalue())
finally:
# unmigrated_app.SillyModel has a foreign key to
# 'migrations.Tribble', but that model is only defined in a
# migration, so the global app registry never sees it and the
# reference is left dangling. Remove it to avoid problems in
# subsequent tests.
apps._pending_operations.pop(("migrations", "tribble"), None)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_sqlmigrate_forwards(self):
"""
sqlmigrate outputs forward looking SQL.
"""
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out)
lines = out.getvalue().splitlines()
if connection.features.can_rollback_ddl:
self.assertEqual(lines[0], connection.ops.start_transaction_sql())
self.assertEqual(lines[-1], connection.ops.end_transaction_sql())
lines = lines[1:-1]
self.assertEqual(
lines[:3],
[
"--",
"-- Create model Author",
"--",
],
)
self.assertIn(
"create table %s" % connection.ops.quote_name("migrations_author").lower(),
lines[3].lower(),
)
pos = lines.index("--", 3)
self.assertEqual(
lines[pos : pos + 3],
[
"--",
"-- Create model Tribble",
"--",
],
)
self.assertIn(
"create table %s" % connection.ops.quote_name("migrations_tribble").lower(),
lines[pos + 3].lower(),
)
pos = lines.index("--", pos + 3)
self.assertEqual(
lines[pos : pos + 3],
[
"--",
"-- Add field bool to tribble",
"--",
],
)
pos = lines.index("--", pos + 3)
self.assertEqual(
lines[pos : pos + 3],
[
"--",
"-- Alter unique_together for author (1 constraint(s))",
"--",
],
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_sqlmigrate_backwards(self):
"""
sqlmigrate outputs reverse looking SQL.
"""
# Cannot generate the reverse SQL unless we've applied the migration.
call_command("migrate", "migrations", verbosity=0)
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out, backwards=True)
lines = out.getvalue().splitlines()
try:
if connection.features.can_rollback_ddl:
self.assertEqual(lines[0], connection.ops.start_transaction_sql())
self.assertEqual(lines[-1], connection.ops.end_transaction_sql())
lines = lines[1:-1]
self.assertEqual(
lines[:3],
[
"--",
"-- Alter unique_together for author (1 constraint(s))",
"--",
],
)
pos = lines.index("--", 3)
self.assertEqual(
lines[pos : pos + 3],
[
"--",
"-- Add field bool to tribble",
"--",
],
)
pos = lines.index("--", pos + 3)
self.assertEqual(
lines[pos : pos + 3],
[
"--",
"-- Create model Tribble",
"--",
],
)
next_pos = lines.index("--", pos + 3)
drop_table_sql = (
"drop table %s"
% connection.ops.quote_name("migrations_tribble").lower()
)
for line in lines[pos + 3 : next_pos]:
if drop_table_sql in line.lower():
break
else:
self.fail("DROP TABLE (tribble) not found.")
pos = next_pos
self.assertEqual(
lines[pos : pos + 3],
[
"--",
"-- Create model Author",
"--",
],
)
drop_table_sql = (
"drop table %s" % connection.ops.quote_name("migrations_author").lower()
)
for line in lines[pos + 3 :]:
if drop_table_sql in line.lower():
break
else:
self.fail("DROP TABLE (author) not found.")
finally:
# Unmigrate everything.
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_non_atomic"}
)
def test_sqlmigrate_for_non_atomic_migration(self):
"""
Transaction wrappers aren't shown for non-atomic migrations.
"""
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out)
output = out.getvalue().lower()
queries = [q.strip() for q in output.splitlines()]
if connection.ops.start_transaction_sql():
self.assertNotIn(connection.ops.start_transaction_sql().lower(), queries)
self.assertNotIn(connection.ops.end_transaction_sql().lower(), queries)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_sqlmigrate_for_non_transactional_databases(self):
"""
Transaction wrappers aren't shown for databases that don't support
transactional DDL.
"""
out = io.StringIO()
with mock.patch.object(connection.features, "can_rollback_ddl", False):
call_command("sqlmigrate", "migrations", "0001", stdout=out)
output = out.getvalue().lower()
queries = [q.strip() for q in output.splitlines()]
start_transaction_sql = connection.ops.start_transaction_sql()
if start_transaction_sql:
self.assertNotIn(start_transaction_sql.lower(), queries)
self.assertNotIn(connection.ops.end_transaction_sql().lower(), queries)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_sqlmigrate_ambiguous_prefix_squashed_migrations(self):
msg = (
"More than one migration matches '0001' in app 'migrations'. "
"Please be more specific."
)
with self.assertRaisesMessage(CommandError, msg):
call_command("sqlmigrate", "migrations", "0001")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_sqlmigrate_squashed_migration(self):
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0001_squashed_0002", stdout=out)
output = out.getvalue().lower()
self.assertIn("-- create model author", output)
self.assertIn("-- create model book", output)
self.assertNotIn("-- create model tribble", output)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_sqlmigrate_replaced_migration(self):
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0001_initial", stdout=out)
output = out.getvalue().lower()
self.assertIn("-- create model author", output)
self.assertIn("-- create model tribble", output)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_no_operations"}
)
def test_sqlmigrate_no_operations(self):
err = io.StringIO()
call_command("sqlmigrate", "migrations", "0001_initial", stderr=err)
self.assertEqual(err.getvalue(), "No operations found.\n")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_noop"}
)
def test_sqlmigrate_noop(self):
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out)
lines = out.getvalue().splitlines()
if connection.features.can_rollback_ddl:
lines = lines[1:-1]
self.assertEqual(
lines,
[
"--",
"-- Raw SQL operation",
"--",
"-- (no-op)",
],
)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_manual_porting"}
)
def test_sqlmigrate_unrepresentable(self):
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0002", stdout=out)
lines = out.getvalue().splitlines()
if connection.features.can_rollback_ddl:
lines = lines[1:-1]
self.assertEqual(
lines,
[
"--",
"-- Raw Python operation",
"--",
"-- THIS OPERATION CANNOT BE WRITTEN AS SQL",
],
)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.migrated_unapplied_app",
"migrations.migrations_test_apps.unmigrated_app",
],
)
def test_regression_22823_unmigrated_fk_to_migrated_model(self):
"""
Assuming you have 3 apps, `A`, `B`, and `C`, such that:
* `A` has migrations
* `B` has a migration we want to apply
* `C` has no migrations, but has an FK to `A`
When we try to migrate "B", an exception occurs because the
"B" was not included in the ProjectState that is used to detect
soft-applied migrations (#22823).
"""
call_command("migrate", "migrated_unapplied_app", verbosity=0)
# unmigrated_app.SillyModel has a foreign key to 'migrations.Tribble',
# but that model is only defined in a migration, so the global app
# registry never sees it and the reference is left dangling. Remove it
# to avoid problems in subsequent tests.
apps._pending_operations.pop(("migrations", "tribble"), None)
@override_settings(
INSTALLED_APPS=["migrations.migrations_test_apps.unmigrated_app_syncdb"]
)
def test_migrate_syncdb_deferred_sql_executed_with_schemaeditor(self):
"""
For an app without migrations, editor.execute() is used for executing
the syncdb deferred SQL.
"""
stdout = io.StringIO()
with mock.patch.object(BaseDatabaseSchemaEditor, "execute") as execute:
call_command(
"migrate", run_syncdb=True, verbosity=1, stdout=stdout, no_color=True
)
create_table_count = len(
[call for call in execute.mock_calls if "CREATE TABLE" in str(call)]
)
self.assertEqual(create_table_count, 2)
# There's at least one deferred SQL for creating the foreign key
# index.
self.assertGreater(len(execute.mock_calls), 2)
stdout = stdout.getvalue()
self.assertIn("Synchronize unmigrated apps: unmigrated_app_syncdb", stdout)
self.assertIn("Creating tables...", stdout)
table_name = truncate_name(
"unmigrated_app_syncdb_classroom", connection.ops.max_name_length()
)
self.assertIn("Creating table %s" % table_name, stdout)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_migrate_syncdb_app_with_migrations(self):
msg = "Can't use run_syncdb with app 'migrations' as it has migrations."
with self.assertRaisesMessage(CommandError, msg):
call_command("migrate", "migrations", run_syncdb=True, verbosity=0)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.unmigrated_app_syncdb",
"migrations.migrations_test_apps.unmigrated_app_simple",
]
)
def test_migrate_syncdb_app_label(self):
"""
Running migrate --run-syncdb with an app_label only creates tables for
the specified app.
"""
stdout = io.StringIO()
with mock.patch.object(BaseDatabaseSchemaEditor, "execute") as execute:
call_command(
"migrate", "unmigrated_app_syncdb", run_syncdb=True, stdout=stdout
)
create_table_count = len(
[call for call in execute.mock_calls if "CREATE TABLE" in str(call)]
)
self.assertEqual(create_table_count, 2)
self.assertGreater(len(execute.mock_calls), 2)
self.assertIn(
"Synchronize unmigrated app: unmigrated_app_syncdb", stdout.getvalue()
)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_migrate_record_replaced(self):
"""
Running a single squashed migration should record all of the original
replaced migrations as run.
"""
recorder = MigrationRecorder(connection)
out = io.StringIO()
call_command("migrate", "migrations", verbosity=0)
call_command("showmigrations", "migrations", stdout=out, no_color=True)
self.assertEqual(
"migrations\n [x] 0001_squashed_0002 (2 squashed migrations)\n",
out.getvalue().lower(),
)
applied_migrations = recorder.applied_migrations()
self.assertIn(("migrations", "0001_initial"), applied_migrations)
self.assertIn(("migrations", "0002_second"), applied_migrations)
self.assertIn(("migrations", "0001_squashed_0002"), applied_migrations)
# Rollback changes
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_migrate_record_squashed(self):
"""
Running migrate for a squashed migration should record as run
if all of the replaced migrations have been run (#25231).
"""
recorder = MigrationRecorder(connection)
recorder.record_applied("migrations", "0001_initial")
recorder.record_applied("migrations", "0002_second")
out = io.StringIO()
call_command("showmigrations", "migrations", stdout=out, no_color=True)
self.assertEqual(
"migrations\n"
" [-] 0001_squashed_0002 (2 squashed migrations) "
"run 'manage.py migrate' to finish recording.\n",
out.getvalue().lower(),
)
out = io.StringIO()
call_command("migrate", "migrations", verbosity=0)
call_command("showmigrations", "migrations", stdout=out, no_color=True)
self.assertEqual(
"migrations\n [x] 0001_squashed_0002 (2 squashed migrations)\n",
out.getvalue().lower(),
)
self.assertIn(
("migrations", "0001_squashed_0002"), recorder.applied_migrations()
)
# No changes were actually applied so there is nothing to rollback
def test_migrate_partially_applied_squashed_migration(self):
"""
Migrating to a squashed migration specified by name should succeed
even if it is partially applied.
"""
with self.temporary_migration_module(module="migrations.test_migrations"):
recorder = MigrationRecorder(connection)
try:
call_command("migrate", "migrations", "0001_initial", verbosity=0)
call_command(
"squashmigrations",
"migrations",
"0002",
interactive=False,
verbosity=0,
)
call_command(
"migrate",
"migrations",
"0001_squashed_0002_second",
verbosity=0,
)
applied_migrations = recorder.applied_migrations()
self.assertIn(("migrations", "0002_second"), applied_migrations)
finally:
# Unmigrate everything.
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_migrate_backward_to_squashed_migration(self):
try:
call_command("migrate", "migrations", "0001_squashed_0002", verbosity=0)
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_book")
call_command("migrate", "migrations", "0001_initial", verbosity=0)
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_book")
finally:
# Unmigrate everything.
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_migrate_inconsistent_history(self):
"""
Running migrate with some migrations applied before their dependencies
should not be allowed.
"""
recorder = MigrationRecorder(connection)
recorder.record_applied("migrations", "0002_second")
msg = (
"Migration migrations.0002_second is applied before its dependency "
"migrations.0001_initial"
)
with self.assertRaisesMessage(InconsistentMigrationHistory, msg):
call_command("migrate")
applied_migrations = recorder.applied_migrations()
self.assertNotIn(("migrations", "0001_initial"), applied_migrations)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_unapplied_app",
"migrations.migrations_test_apps.migrated_app",
]
)
def test_migrate_not_reflected_changes(self):
class NewModel1(models.Model):
class Meta:
app_label = "migrated_app"
class NewModel2(models.Model):
class Meta:
app_label = "migrated_unapplied_app"
out = io.StringIO()
try:
call_command("migrate", verbosity=0)
call_command("migrate", stdout=out, no_color=True)
self.assertEqual(
"operations to perform:\n"
" apply all migrations: migrated_app, migrated_unapplied_app\n"
"running migrations:\n"
" no migrations to apply.\n"
" your models in app(s): 'migrated_app', "
"'migrated_unapplied_app' have changes that are not yet "
"reflected in a migration, and so won't be applied.\n"
" run 'manage.py makemigrations' to make new migrations, and "
"then re-run 'manage.py migrate' to apply them.\n",
out.getvalue().lower(),
)
finally:
# Unmigrate everything.
call_command("migrate", "migrated_app", "zero", verbosity=0)
call_command("migrate", "migrated_unapplied_app", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_migrations_squashed_no_replaces",
}
)
def test_migrate_prune(self):
"""
With prune=True, references to migration files deleted from the
migrations module (such as after being squashed) are removed from the
django_migrations table.
"""
recorder = MigrationRecorder(connection)
recorder.record_applied("migrations", "0001_initial")
recorder.record_applied("migrations", "0002_second")
recorder.record_applied("migrations", "0001_squashed_0002")
out = io.StringIO()
try:
call_command("migrate", "migrations", prune=True, stdout=out, no_color=True)
self.assertEqual(
out.getvalue(),
"Pruning migrations:\n"
" Pruning migrations.0001_initial OK\n"
" Pruning migrations.0002_second OK\n",
)
applied_migrations = [
migration
for migration in recorder.applied_migrations()
if migration[0] == "migrations"
]
self.assertEqual(applied_migrations, [("migrations", "0001_squashed_0002")])
finally:
recorder.record_unapplied("migrations", "0001_initial")
recorder.record_unapplied("migrations", "0001_second")
recorder.record_unapplied("migrations", "0001_squashed_0002")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_prune_deleted_squashed_migrations_in_replaces(self):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_squashed"
) as migration_dir:
try:
call_command("migrate", "migrations", verbosity=0)
# Delete the replaced migrations.
os.remove(os.path.join(migration_dir, "0001_initial.py"))
os.remove(os.path.join(migration_dir, "0002_second.py"))
# --prune cannot be used before removing the "replaces"
# attribute.
call_command(
"migrate",
"migrations",
prune=True,
stdout=out,
no_color=True,
)
self.assertEqual(
out.getvalue(),
"Pruning migrations:\n"
" Cannot use --prune because the following squashed "
"migrations have their 'replaces' attributes and may not "
"be recorded as applied:\n"
" migrations.0001_squashed_0002\n"
" Re-run 'manage.py migrate' if they are not marked as "
"applied, and remove 'replaces' attributes in their "
"Migration classes.\n",
)
finally:
# Unmigrate everything.
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}
)
def test_prune_no_migrations_to_prune(self):
out = io.StringIO()
call_command("migrate", "migrations", prune=True, stdout=out, no_color=True)
self.assertEqual(
out.getvalue(),
"Pruning migrations:\n No migrations to prune.\n",
)
out = io.StringIO()
call_command(
"migrate",
"migrations",
prune=True,
stdout=out,
no_color=True,
verbosity=0,
)
self.assertEqual(out.getvalue(), "")
def test_prune_no_app_label(self):
msg = "Migrations can be pruned only when an app is specified."
with self.assertRaisesMessage(CommandError, msg):
call_command("migrate", prune=True)
class MakeMigrationsTests(MigrationTestBase):
"""
Tests running the makemigrations command.
"""
def setUp(self):
super().setUp()
self._old_models = apps.app_configs["migrations"].models.copy()
def tearDown(self):
apps.app_configs["migrations"].models = self._old_models
apps.all_models["migrations"] = self._old_models
apps.clear_cache()
super().tearDown()
def test_files_content(self):
self.assertTableNotExists("migrations_unicodemodel")
apps.register_model("migrations", UnicodeModel)
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", verbosity=0)
# Check for empty __init__.py file in migrations folder
init_file = os.path.join(migration_dir, "__init__.py")
self.assertTrue(os.path.exists(init_file))
with open(init_file) as fp:
content = fp.read()
self.assertEqual(content, "")
# Check for existing 0001_initial.py file in migration folder
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
with open(initial_file, encoding="utf-8") as fp:
content = fp.read()
self.assertIn("migrations.CreateModel", content)
self.assertIn("initial = True", content)
self.assertIn("úñí©óðé µóðéø", content) # Meta.verbose_name
self.assertIn("úñí©óðé µóðéøß", content) # Meta.verbose_name_plural
self.assertIn("ÚÑÍ¢ÓÐÉ", content) # title.verbose_name
self.assertIn("“Ðjáñgó”", content) # title.default
def test_makemigrations_order(self):
"""
makemigrations should recognize number-only migrations (0001.py).
"""
module = "migrations.test_migrations_order"
with self.temporary_migration_module(module=module) as migration_dir:
if hasattr(importlib, "invalidate_caches"):
# importlib caches os.listdir() on some platforms like macOS
# (#23850).
importlib.invalidate_caches()
call_command(
"makemigrations", "migrations", "--empty", "-n", "a", "-v", "0"
)
self.assertTrue(os.path.exists(os.path.join(migration_dir, "0002_a.py")))
def test_makemigrations_empty_connections(self):
empty_connections = ConnectionHandler({"default": {}})
with mock.patch(
"django.core.management.commands.makemigrations.connections",
new=empty_connections,
):
# with no apps
out = io.StringIO()
call_command("makemigrations", stdout=out)
self.assertIn("No changes detected", out.getvalue())
# with an app
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", verbosity=0)
init_file = os.path.join(migration_dir, "__init__.py")
self.assertTrue(os.path.exists(init_file))
@override_settings(INSTALLED_APPS=["migrations", "migrations2"])
def test_makemigrations_consistency_checks_respect_routers(self):
"""
The history consistency checks in makemigrations respect
settings.DATABASE_ROUTERS.
"""
def patched_has_table(migration_recorder):
if migration_recorder.connection is connections["other"]:
raise Exception("Other connection")
else:
return mock.DEFAULT
self.assertTableNotExists("migrations_unicodemodel")
apps.register_model("migrations", UnicodeModel)
with mock.patch.object(
MigrationRecorder, "has_table", autospec=True, side_effect=patched_has_table
) as has_table:
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", verbosity=0)
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
self.assertEqual(has_table.call_count, 1) # 'default' is checked
# Router says not to migrate 'other' so consistency shouldn't
# be checked.
with self.settings(DATABASE_ROUTERS=["migrations.routers.TestRouter"]):
call_command("makemigrations", "migrations", verbosity=0)
self.assertEqual(has_table.call_count, 2) # 'default' again
# With a router that doesn't prohibit migrating 'other',
# consistency is checked.
with self.settings(
DATABASE_ROUTERS=["migrations.routers.DefaultOtherRouter"]
):
with self.assertRaisesMessage(Exception, "Other connection"):
call_command("makemigrations", "migrations", verbosity=0)
self.assertEqual(has_table.call_count, 4) # 'default' and 'other'
# With a router that doesn't allow migrating on any database,
# no consistency checks are made.
with self.settings(DATABASE_ROUTERS=["migrations.routers.TestRouter"]):
with mock.patch.object(
TestRouter, "allow_migrate", return_value=False
) as allow_migrate:
call_command("makemigrations", "migrations", verbosity=0)
allow_migrate.assert_any_call(
"other", "migrations", model_name="UnicodeModel"
)
# allow_migrate() is called with the correct arguments.
self.assertGreater(len(allow_migrate.mock_calls), 0)
called_aliases = set()
for mock_call in allow_migrate.mock_calls:
_, call_args, call_kwargs = mock_call
connection_alias, app_name = call_args
called_aliases.add(connection_alias)
# Raises an error if invalid app_name/model_name occurs.
apps.get_app_config(app_name).get_model(call_kwargs["model_name"])
self.assertEqual(called_aliases, set(connections))
self.assertEqual(has_table.call_count, 4)
def test_failing_migration(self):
# If a migration fails to serialize, it shouldn't generate an empty file. #21280
apps.register_model("migrations", UnserializableModel)
with self.temporary_migration_module() as migration_dir:
with self.assertRaisesMessage(ValueError, "Cannot serialize"):
call_command("makemigrations", "migrations", verbosity=0)
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertFalse(os.path.exists(initial_file))
def test_makemigrations_conflict_exit(self):
"""
makemigrations exits if it detects a conflict.
"""
with self.temporary_migration_module(
module="migrations.test_migrations_conflict"
):
with self.assertRaises(CommandError) as context:
call_command("makemigrations")
self.assertEqual(
str(context.exception),
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (0002_conflicting_second, 0002_second in "
"migrations).\n"
"To fix them run 'python manage.py makemigrations --merge'",
)
def test_makemigrations_merge_no_conflict(self):
"""
makemigrations exits if in merge mode with no conflicts.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations"):
call_command("makemigrations", merge=True, stdout=out)
self.assertIn("No conflicts detected to merge.", out.getvalue())
def test_makemigrations_empty_no_app_specified(self):
"""
makemigrations exits if no app is specified with 'empty' mode.
"""
msg = "You must supply at least one app label when using --empty."
with self.assertRaisesMessage(CommandError, msg):
call_command("makemigrations", empty=True)
def test_makemigrations_empty_migration(self):
"""
makemigrations properly constructs an empty migration.
"""
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", empty=True, verbosity=0)
# Check for existing 0001_initial.py file in migration folder
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
with open(initial_file, encoding="utf-8") as fp:
content = fp.read()
# Remove all whitespace to check for empty dependencies and operations
content = content.replace(" ", "")
self.assertIn(
"dependencies=[]" if HAS_BLACK else "dependencies=[\n]", content
)
self.assertIn(
"operations=[]" if HAS_BLACK else "operations=[\n]", content
)
@override_settings(MIGRATION_MODULES={"migrations": None})
def test_makemigrations_disabled_migrations_for_app(self):
"""
makemigrations raises a nice error when migrations are disabled for an
app.
"""
msg = (
"Django can't create migrations for app 'migrations' because migrations "
"have been disabled via the MIGRATION_MODULES setting."
)
with self.assertRaisesMessage(ValueError, msg):
call_command("makemigrations", "migrations", empty=True, verbosity=0)
def test_makemigrations_no_changes_no_apps(self):
"""
makemigrations exits when there are no changes and no apps are specified.
"""
out = io.StringIO()
call_command("makemigrations", stdout=out)
self.assertIn("No changes detected", out.getvalue())
def test_makemigrations_no_changes(self):
"""
makemigrations exits when there are no changes to an app.
"""
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_no_changes"
):
call_command("makemigrations", "migrations", stdout=out)
self.assertIn("No changes detected in app 'migrations'", out.getvalue())
def test_makemigrations_no_apps_initial(self):
"""
makemigrations should detect initial is needed on empty migration
modules if no app provided.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_empty"):
call_command("makemigrations", stdout=out)
self.assertIn("0001_initial.py", out.getvalue())
def test_makemigrations_no_init(self):
"""Migration directories without an __init__.py file are allowed."""
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_no_init"
):
call_command("makemigrations", stdout=out)
self.assertIn("0001_initial.py", out.getvalue())
def test_makemigrations_migrations_announce(self):
"""
makemigrations announces the migration at the default verbosity level.
"""
out = io.StringIO()
with self.temporary_migration_module():
call_command("makemigrations", "migrations", stdout=out)
self.assertIn("Migrations for 'migrations'", out.getvalue())
def test_makemigrations_no_common_ancestor(self):
"""
makemigrations fails to merge migrations with no common ancestor.
"""
with self.assertRaises(ValueError) as context:
with self.temporary_migration_module(
module="migrations.test_migrations_no_ancestor"
):
call_command("makemigrations", "migrations", merge=True)
exception_message = str(context.exception)
self.assertIn("Could not find common ancestor of", exception_message)
self.assertIn("0002_second", exception_message)
self.assertIn("0002_conflicting_second", exception_message)
def test_makemigrations_interactive_reject(self):
"""
makemigrations enters and exits interactive mode properly.
"""
# Monkeypatch interactive questioner to auto reject
with mock.patch("builtins.input", mock.Mock(return_value="N")):
with self.temporary_migration_module(
module="migrations.test_migrations_conflict"
) as migration_dir:
with captured_stdout():
call_command(
"makemigrations",
"migrations",
name="merge",
merge=True,
interactive=True,
verbosity=0,
)
merge_file = os.path.join(migration_dir, "0003_merge.py")
self.assertFalse(os.path.exists(merge_file))
def test_makemigrations_interactive_accept(self):
"""
makemigrations enters interactive mode and merges properly.
"""
# Monkeypatch interactive questioner to auto accept
with mock.patch("builtins.input", mock.Mock(return_value="y")):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_conflict"
) as migration_dir:
call_command(
"makemigrations",
"migrations",
name="merge",
merge=True,
interactive=True,
stdout=out,
)
merge_file = os.path.join(migration_dir, "0003_merge.py")
self.assertTrue(os.path.exists(merge_file))
self.assertIn("Created new merge migration", out.getvalue())
def test_makemigrations_default_merge_name(self):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_conflict"
) as migration_dir:
call_command(
"makemigrations",
"migrations",
merge=True,
interactive=False,
stdout=out,
)
merge_file = os.path.join(
migration_dir,
"0003_merge_0002_conflicting_second_0002_second.py",
)
self.assertIs(os.path.exists(merge_file), True)
with open(merge_file, encoding="utf-8") as fp:
content = fp.read()
if HAS_BLACK:
target_str = '("migrations", "0002_conflicting_second")'
else:
target_str = "('migrations', '0002_conflicting_second')"
self.assertIn(target_str, content)
self.assertIn("Created new merge migration %s" % merge_file, out.getvalue())
@mock.patch("django.db.migrations.utils.datetime")
def test_makemigrations_auto_merge_name(self, mock_datetime):
mock_datetime.datetime.now.return_value = datetime.datetime(2016, 1, 2, 3, 4)
with mock.patch("builtins.input", mock.Mock(return_value="y")):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_conflict_long_name"
) as migration_dir:
call_command(
"makemigrations",
"migrations",
merge=True,
interactive=True,
stdout=out,
)
merge_file = os.path.join(migration_dir, "0003_merge_20160102_0304.py")
self.assertTrue(os.path.exists(merge_file))
self.assertIn("Created new merge migration", out.getvalue())
def test_makemigrations_non_interactive_not_null_addition(self):
"""
Non-interactive makemigrations fails when a default is missing on a
new not-null field.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_int = models.IntegerField()
class Meta:
app_label = "migrations"
with self.assertRaises(SystemExit):
with self.temporary_migration_module(
module="migrations.test_migrations_no_default"
):
with captured_stdout() as out:
call_command("makemigrations", "migrations", interactive=False)
self.assertIn(
"Field 'silly_int' on model 'sillymodel' not migrated: it is "
"impossible to add a non-nullable field without specifying a "
"default.",
out.getvalue(),
)
def test_makemigrations_interactive_not_null_addition(self):
"""
makemigrations messages when adding a NOT NULL field in interactive
mode.
"""
class Author(models.Model):
silly_field = models.BooleanField(null=False)
class Meta:
app_label = "migrations"
input_msg = (
"It is impossible to add a non-nullable field 'silly_field' to "
"author without specifying a default. This is because the "
"database needs something to populate existing rows.\n"
"Please select a fix:\n"
" 1) Provide a one-off default now (will be set on all existing "
"rows with a null value for this column)\n"
" 2) Quit and manually define a default value in models.py."
)
with self.temporary_migration_module(module="migrations.test_migrations"):
# 2 - quit.
with mock.patch("builtins.input", return_value="2"):
with captured_stdout() as out, self.assertRaises(SystemExit):
call_command("makemigrations", "migrations", interactive=True)
self.assertIn(input_msg, out.getvalue())
# 1 - provide a default.
with mock.patch("builtins.input", return_value="1"):
with captured_stdout() as out:
call_command("makemigrations", "migrations", interactive=True)
output = out.getvalue()
self.assertIn(input_msg, output)
self.assertIn("Please enter the default value as valid Python.", output)
self.assertIn(
"The datetime and django.utils.timezone modules are "
"available, so it is possible to provide e.g. timezone.now as "
"a value",
output,
)
self.assertIn("Type 'exit' to exit this prompt", output)
def test_makemigrations_non_interactive_not_null_alteration(self):
"""
Non-interactive makemigrations fails when a default is missing on a
field changed to not-null.
"""
class Author(models.Model):
name = models.CharField(max_length=255)
slug = models.SlugField()
age = models.IntegerField(default=0)
class Meta:
app_label = "migrations"
with self.temporary_migration_module(module="migrations.test_migrations"):
with captured_stdout() as out:
call_command("makemigrations", "migrations", interactive=False)
self.assertIn("Alter field slug on author", out.getvalue())
self.assertIn(
"Field 'slug' on model 'author' given a default of NOT PROVIDED "
"and must be corrected.",
out.getvalue(),
)
def test_makemigrations_interactive_not_null_alteration(self):
"""
makemigrations messages when changing a NULL field to NOT NULL in
interactive mode.
"""
class Author(models.Model):
slug = models.SlugField(null=False)
class Meta:
app_label = "migrations"
input_msg = (
"It is impossible to change a nullable field 'slug' on author to "
"non-nullable without providing a default. This is because the "
"database needs something to populate existing rows.\n"
"Please select a fix:\n"
" 1) Provide a one-off default now (will be set on all existing "
"rows with a null value for this column)\n"
" 2) Ignore for now. Existing rows that contain NULL values will "
"have to be handled manually, for example with a RunPython or "
"RunSQL operation.\n"
" 3) Quit and manually define a default value in models.py."
)
with self.temporary_migration_module(module="migrations.test_migrations"):
# No message appears if --dry-run.
with captured_stdout() as out:
call_command(
"makemigrations",
"migrations",
interactive=True,
dry_run=True,
)
self.assertNotIn(input_msg, out.getvalue())
# 3 - quit.
with mock.patch("builtins.input", return_value="3"):
with captured_stdout() as out, self.assertRaises(SystemExit):
call_command("makemigrations", "migrations", interactive=True)
self.assertIn(input_msg, out.getvalue())
# 1 - provide a default.
with mock.patch("builtins.input", return_value="1"):
with captured_stdout() as out:
call_command("makemigrations", "migrations", interactive=True)
output = out.getvalue()
self.assertIn(input_msg, output)
self.assertIn("Please enter the default value as valid Python.", output)
self.assertIn(
"The datetime and django.utils.timezone modules are "
"available, so it is possible to provide e.g. timezone.now as "
"a value",
output,
)
self.assertIn("Type 'exit' to exit this prompt", output)
def test_makemigrations_non_interactive_no_model_rename(self):
"""
makemigrations adds and removes a possible model rename in
non-interactive mode.
"""
class RenamedModel(models.Model):
silly_field = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_no_default"
):
call_command("makemigrations", "migrations", interactive=False, stdout=out)
self.assertIn("Delete model SillyModel", out.getvalue())
self.assertIn("Create model RenamedModel", out.getvalue())
def test_makemigrations_non_interactive_no_field_rename(self):
"""
makemigrations adds and removes a possible field rename in
non-interactive mode.
"""
class SillyModel(models.Model):
silly_rename = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_no_default"
):
call_command("makemigrations", "migrations", interactive=False, stdout=out)
self.assertIn("Remove field silly_field from sillymodel", out.getvalue())
self.assertIn("Add field silly_rename to sillymodel", out.getvalue())
@mock.patch("builtins.input", return_value="Y")
def test_makemigrations_model_rename_interactive(self, mock_input):
class RenamedModel(models.Model):
silly_field = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
with self.temporary_migration_module(
module="migrations.test_migrations_no_default",
):
with captured_stdout() as out:
call_command("makemigrations", "migrations", interactive=True)
self.assertIn("Rename model SillyModel to RenamedModel", out.getvalue())
@mock.patch("builtins.input", return_value="Y")
def test_makemigrations_field_rename_interactive(self, mock_input):
class SillyModel(models.Model):
silly_rename = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
with self.temporary_migration_module(
module="migrations.test_migrations_no_default",
):
with captured_stdout() as out:
call_command("makemigrations", "migrations", interactive=True)
self.assertIn(
"Rename field silly_field on sillymodel to silly_rename",
out.getvalue(),
)
def test_makemigrations_handle_merge(self):
"""
makemigrations properly merges the conflicting migrations with --noinput.
"""
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_conflict"
) as migration_dir:
call_command(
"makemigrations",
"migrations",
name="merge",
merge=True,
interactive=False,
stdout=out,
)
merge_file = os.path.join(migration_dir, "0003_merge.py")
self.assertTrue(os.path.exists(merge_file))
output = out.getvalue()
self.assertIn("Merging migrations", output)
self.assertIn("Branch 0002_second", output)
self.assertIn("Branch 0002_conflicting_second", output)
self.assertIn("Created new merge migration", output)
def test_makemigration_merge_dry_run(self):
"""
makemigrations respects --dry-run option when fixing migration
conflicts (#24427).
"""
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_conflict"
) as migration_dir:
call_command(
"makemigrations",
"migrations",
name="merge",
dry_run=True,
merge=True,
interactive=False,
stdout=out,
)
merge_file = os.path.join(migration_dir, "0003_merge.py")
self.assertFalse(os.path.exists(merge_file))
output = out.getvalue()
self.assertIn("Merging migrations", output)
self.assertIn("Branch 0002_second", output)
self.assertIn("Branch 0002_conflicting_second", output)
self.assertNotIn("Created new merge migration", output)
def test_makemigration_merge_dry_run_verbosity_3(self):
"""
`makemigrations --merge --dry-run` writes the merge migration file to
stdout with `verbosity == 3` (#24427).
"""
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_conflict"
) as migration_dir:
call_command(
"makemigrations",
"migrations",
name="merge",
dry_run=True,
merge=True,
interactive=False,
stdout=out,
verbosity=3,
)
merge_file = os.path.join(migration_dir, "0003_merge.py")
self.assertFalse(os.path.exists(merge_file))
output = out.getvalue()
self.assertIn("Merging migrations", output)
self.assertIn("Branch 0002_second", output)
self.assertIn("Branch 0002_conflicting_second", output)
self.assertNotIn("Created new merge migration", output)
# Additional output caused by verbosity 3
# The complete merge migration file that would be written
self.assertIn("class Migration(migrations.Migration):", output)
self.assertIn("dependencies = [", output)
self.assertIn("('migrations', '0002_second')", output)
self.assertIn("('migrations', '0002_conflicting_second')", output)
self.assertIn("operations = [", output)
self.assertIn("]", output)
def test_makemigrations_dry_run(self):
"""
`makemigrations --dry-run` should not ask for defaults.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_date = models.DateField() # Added field without a default
silly_auto_now = models.DateTimeField(auto_now_add=True)
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_no_default"
):
call_command("makemigrations", "migrations", dry_run=True, stdout=out)
# Output the expected changes directly, without asking for defaults
self.assertIn("Add field silly_date to sillymodel", out.getvalue())
def test_makemigrations_dry_run_verbosity_3(self):
"""
Allow `makemigrations --dry-run` to output the migrations file to
stdout (with verbosity == 3).
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_char = models.CharField(default="")
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_no_default"
):
call_command(
"makemigrations", "migrations", dry_run=True, stdout=out, verbosity=3
)
# Normal --dry-run output
self.assertIn("- Add field silly_char to sillymodel", out.getvalue())
# Additional output caused by verbosity 3
# The complete migrations file that would be written
self.assertIn("class Migration(migrations.Migration):", out.getvalue())
self.assertIn("dependencies = [", out.getvalue())
self.assertIn("('migrations', '0001_initial'),", out.getvalue())
self.assertIn("migrations.AddField(", out.getvalue())
self.assertIn("model_name='sillymodel',", out.getvalue())
self.assertIn("name='silly_char',", out.getvalue())
def test_makemigrations_scriptable(self):
"""
With scriptable=True, log output is diverted to stderr, and only the
paths of generated migration files are written to stdout.
"""
out = io.StringIO()
err = io.StringIO()
with self.temporary_migration_module(
module="migrations.migrations.test_migrations",
) as migration_dir:
call_command(
"makemigrations",
"migrations",
scriptable=True,
stdout=out,
stderr=err,
)
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertEqual(out.getvalue(), f"{initial_file}\n")
self.assertIn(" - Create model ModelWithCustomBase\n", err.getvalue())
@mock.patch("builtins.input", return_value="Y")
def test_makemigrations_scriptable_merge(self, mock_input):
out = io.StringIO()
err = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_conflict",
) as migration_dir:
call_command(
"makemigrations",
"migrations",
merge=True,
name="merge",
scriptable=True,
stdout=out,
stderr=err,
)
merge_file = os.path.join(migration_dir, "0003_merge.py")
self.assertEqual(out.getvalue(), f"{merge_file}\n")
self.assertIn(f"Created new merge migration {merge_file}", err.getvalue())
def test_makemigrations_migrations_modules_path_not_exist(self):
"""
makemigrations creates migrations when specifying a custom location
for migration files using MIGRATION_MODULES if the custom path
doesn't already exist.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
out = io.StringIO()
migration_module = "migrations.test_migrations_path_doesnt_exist.foo.bar"
with self.temporary_migration_module(module=migration_module) as migration_dir:
call_command("makemigrations", "migrations", stdout=out)
# Migrations file is actually created in the expected path.
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
# Command output indicates the migration is created.
self.assertIn(" - Create model SillyModel", out.getvalue())
@override_settings(MIGRATION_MODULES={"migrations": "some.nonexistent.path"})
def test_makemigrations_migrations_modules_nonexistent_toplevel_package(self):
msg = (
"Could not locate an appropriate location to create migrations "
"package some.nonexistent.path. Make sure the toplevel package "
"exists and can be imported."
)
with self.assertRaisesMessage(ValueError, msg):
call_command("makemigrations", "migrations", empty=True, verbosity=0)
def test_makemigrations_interactive_by_default(self):
"""
The user is prompted to merge by default if there are conflicts and
merge is True. Answer negative to differentiate it from behavior when
--noinput is specified.
"""
# Monkeypatch interactive questioner to auto reject
out = io.StringIO()
with mock.patch("builtins.input", mock.Mock(return_value="N")):
with self.temporary_migration_module(
module="migrations.test_migrations_conflict"
) as migration_dir:
call_command(
"makemigrations", "migrations", name="merge", merge=True, stdout=out
)
merge_file = os.path.join(migration_dir, "0003_merge.py")
# This will fail if interactive is False by default
self.assertFalse(os.path.exists(merge_file))
self.assertNotIn("Created new merge migration", out.getvalue())
@override_settings(
INSTALLED_APPS=[
"migrations",
"migrations.migrations_test_apps.unspecified_app_with_conflict",
]
)
def test_makemigrations_unspecified_app_with_conflict_no_merge(self):
"""
makemigrations does not raise a CommandError when an unspecified app
has conflicting migrations.
"""
with self.temporary_migration_module(
module="migrations.test_migrations_no_changes"
):
call_command("makemigrations", "migrations", merge=False, verbosity=0)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.unspecified_app_with_conflict",
]
)
def test_makemigrations_unspecified_app_with_conflict_merge(self):
"""
makemigrations does not create a merge for an unspecified app even if
it has conflicting migrations.
"""
# Monkeypatch interactive questioner to auto accept
with mock.patch("builtins.input", mock.Mock(return_value="y")):
out = io.StringIO()
with self.temporary_migration_module(
app_label="migrated_app"
) as migration_dir:
call_command(
"makemigrations",
"migrated_app",
name="merge",
merge=True,
interactive=True,
stdout=out,
)
merge_file = os.path.join(migration_dir, "0003_merge.py")
self.assertFalse(os.path.exists(merge_file))
self.assertIn("No conflicts detected to merge.", out.getvalue())
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.conflicting_app_with_dependencies",
]
)
def test_makemigrations_merge_dont_output_dependency_operations(self):
"""
makemigrations --merge does not output any operations from apps that
don't belong to a given app.
"""
# Monkeypatch interactive questioner to auto accept
with mock.patch("builtins.input", mock.Mock(return_value="N")):
out = io.StringIO()
with mock.patch(
"django.core.management.color.supports_color", lambda *args: False
):
call_command(
"makemigrations",
"conflicting_app_with_dependencies",
merge=True,
interactive=True,
stdout=out,
)
self.assertEqual(
out.getvalue().lower(),
"merging conflicting_app_with_dependencies\n"
" branch 0002_conflicting_second\n"
" - create model something\n"
" branch 0002_second\n"
" - delete model tribble\n"
" - remove field silly_field from author\n"
" - add field rating to author\n"
" - create model book\n"
"\n"
"merging will only work if the operations printed above do not "
"conflict\n"
"with each other (working on different fields or models)\n"
"should these migration branches be merged? [y/n] ",
)
def test_makemigrations_with_custom_name(self):
"""
makemigrations --name generate a custom migration name.
"""
with self.temporary_migration_module() as migration_dir:
def cmd(migration_count, migration_name, *args):
call_command(
"makemigrations",
"migrations",
"--verbosity",
"0",
"--name",
migration_name,
*args,
)
migration_file = os.path.join(
migration_dir, "%s_%s.py" % (migration_count, migration_name)
)
# Check for existing migration file in migration folder
self.assertTrue(os.path.exists(migration_file))
with open(migration_file, encoding="utf-8") as fp:
content = fp.read()
content = content.replace(" ", "")
return content
# generate an initial migration
migration_name_0001 = "my_initial_migration"
content = cmd("0001", migration_name_0001)
self.assertIn(
"dependencies=[]" if HAS_BLACK else "dependencies=[\n]", content
)
# importlib caches os.listdir() on some platforms like macOS
# (#23850).
if hasattr(importlib, "invalidate_caches"):
importlib.invalidate_caches()
# generate an empty migration
migration_name_0002 = "my_custom_migration"
content = cmd("0002", migration_name_0002, "--empty")
if HAS_BLACK:
template_str = 'dependencies=[\n("migrations","0001_%s"),\n]'
else:
template_str = "dependencies=[\n('migrations','0001_%s'),\n]"
self.assertIn(
template_str % migration_name_0001,
content,
)
self.assertIn("operations=[]" if HAS_BLACK else "operations=[\n]", content)
def test_makemigrations_with_invalid_custom_name(self):
msg = "The migration name must be a valid Python identifier."
with self.assertRaisesMessage(CommandError, msg):
call_command(
"makemigrations", "migrations", "--name", "invalid name", "--empty"
)
def test_makemigrations_check(self):
"""
makemigrations --check should exit with a non-zero status when
there are changes to an app requiring migrations.
"""
with self.temporary_migration_module():
with self.assertRaises(SystemExit):
call_command("makemigrations", "--check", "migrations", verbosity=0)
with self.temporary_migration_module(
module="migrations.test_migrations_no_changes"
):
call_command("makemigrations", "--check", "migrations", verbosity=0)
def test_makemigrations_migration_path_output(self):
"""
makemigrations should print the relative paths to the migrations unless
they are outside of the current tree, in which case the absolute path
should be shown.
"""
out = io.StringIO()
apps.register_model("migrations", UnicodeModel)
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", stdout=out)
self.assertIn(
os.path.join(migration_dir, "0001_initial.py"), out.getvalue()
)
def test_makemigrations_migration_path_output_valueerror(self):
"""
makemigrations prints the absolute path if os.path.relpath() raises a
ValueError when it's impossible to obtain a relative path, e.g. on
Windows if Django is installed on a different drive than where the
migration files are created.
"""
out = io.StringIO()
with self.temporary_migration_module() as migration_dir:
with mock.patch("os.path.relpath", side_effect=ValueError):
call_command("makemigrations", "migrations", stdout=out)
self.assertIn(os.path.join(migration_dir, "0001_initial.py"), out.getvalue())
def test_makemigrations_inconsistent_history(self):
"""
makemigrations should raise InconsistentMigrationHistory exception if
there are some migrations applied before their dependencies.
"""
recorder = MigrationRecorder(connection)
recorder.record_applied("migrations", "0002_second")
msg = (
"Migration migrations.0002_second is applied before its dependency "
"migrations.0001_initial"
)
with self.temporary_migration_module(module="migrations.test_migrations"):
with self.assertRaisesMessage(InconsistentMigrationHistory, msg):
call_command("makemigrations")
def test_makemigrations_inconsistent_history_db_failure(self):
msg = (
"Got an error checking a consistent migration history performed "
"for database connection 'default': could not connect to server"
)
with mock.patch(
"django.db.migrations.loader.MigrationLoader.check_consistent_history",
side_effect=OperationalError("could not connect to server"),
):
with self.temporary_migration_module():
with self.assertWarns(RuntimeWarning) as cm:
call_command("makemigrations", verbosity=0)
self.assertEqual(str(cm.warning), msg)
@mock.patch("builtins.input", return_value="1")
@mock.patch(
"django.db.migrations.questioner.sys.stdin",
mock.MagicMock(encoding=sys.getdefaultencoding()),
)
def test_makemigrations_auto_now_add_interactive(self, *args):
"""
makemigrations prompts the user when adding auto_now_add to an existing
model.
"""
class Entry(models.Model):
title = models.CharField(max_length=255)
creation_date = models.DateTimeField(auto_now_add=True)
class Meta:
app_label = "migrations"
input_msg = (
"It is impossible to add the field 'creation_date' with "
"'auto_now_add=True' to entry without providing a default. This "
"is because the database needs something to populate existing "
"rows.\n"
" 1) Provide a one-off default now which will be set on all "
"existing rows\n"
" 2) Quit and manually define a default value in models.py."
)
# Monkeypatch interactive questioner to auto accept
prompt_stdout = io.StringIO()
with self.temporary_migration_module(module="migrations.test_auto_now_add"):
call_command(
"makemigrations", "migrations", interactive=True, stdout=prompt_stdout
)
prompt_output = prompt_stdout.getvalue()
self.assertIn(input_msg, prompt_output)
self.assertIn("Please enter the default value as valid Python.", prompt_output)
self.assertIn(
"Accept the default 'timezone.now' by pressing 'Enter' or provide "
"another value.",
prompt_output,
)
self.assertIn("Type 'exit' to exit this prompt", prompt_output)
self.assertIn("Add field creation_date to entry", prompt_output)
@mock.patch("builtins.input", return_value="2")
def test_makemigrations_auto_now_add_interactive_quit(self, mock_input):
class Author(models.Model):
publishing_date = models.DateField(auto_now_add=True)
class Meta:
app_label = "migrations"
with self.temporary_migration_module(module="migrations.test_migrations"):
with captured_stdout():
with self.assertRaises(SystemExit):
call_command("makemigrations", "migrations", interactive=True)
def test_makemigrations_non_interactive_auto_now_add_addition(self):
"""
Non-interactive makemigrations fails when a default is missing on a
new field when auto_now_add=True.
"""
class Entry(models.Model):
creation_date = models.DateTimeField(auto_now_add=True)
class Meta:
app_label = "migrations"
with self.temporary_migration_module(module="migrations.test_auto_now_add"):
with self.assertRaises(SystemExit), captured_stdout() as out:
call_command("makemigrations", "migrations", interactive=False)
self.assertIn(
"Field 'creation_date' on model 'entry' not migrated: it is "
"impossible to add a field with 'auto_now_add=True' without "
"specifying a default.",
out.getvalue(),
)
def test_makemigrations_interactive_unique_callable_default_addition(self):
"""
makemigrations prompts the user when adding a unique field with
a callable default.
"""
class Book(models.Model):
created = models.DateTimeField(unique=True, default=timezone.now)
class Meta:
app_label = "migrations"
version = get_docs_version()
input_msg = (
f"Callable default on unique field book.created will not generate "
f"unique values upon migrating.\n"
f"Please choose how to proceed:\n"
f" 1) Continue making this migration as the first step in writing "
f"a manual migration to generate unique values described here: "
f"https://docs.djangoproject.com/en/{version}/howto/"
f"writing-migrations/#migrations-that-add-unique-fields.\n"
f" 2) Quit and edit field options in models.py.\n"
)
with self.temporary_migration_module(module="migrations.test_migrations"):
# 2 - quit.
with mock.patch("builtins.input", return_value="2"):
with captured_stdout() as out, self.assertRaises(SystemExit):
call_command("makemigrations", "migrations", interactive=True)
out_value = out.getvalue()
self.assertIn(input_msg, out_value)
self.assertNotIn("Add field created to book", out_value)
# 1 - continue.
with mock.patch("builtins.input", return_value="1"):
with captured_stdout() as out:
call_command("makemigrations", "migrations", interactive=True)
out_value = out.getvalue()
self.assertIn(input_msg, out_value)
self.assertIn("Add field created to book", out_value)
def test_makemigrations_non_interactive_unique_callable_default_addition(self):
class Book(models.Model):
created = models.DateTimeField(unique=True, default=timezone.now)
class Meta:
app_label = "migrations"
with self.temporary_migration_module(module="migrations.test_migrations"):
with captured_stdout() as out:
call_command("makemigrations", "migrations", interactive=False)
out_value = out.getvalue()
self.assertIn("Add field created to book", out_value)
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"},
)
def test_makemigrations_continues_number_sequence_after_squash(self):
with self.temporary_migration_module(
module="migrations.test_migrations_squashed"
):
with captured_stdout() as out:
call_command(
"makemigrations",
"migrations",
interactive=False,
empty=True,
)
out_value = out.getvalue()
self.assertIn("0003_auto", out_value)
def test_makemigrations_update(self):
with self.temporary_migration_module(
module="migrations.test_migrations"
) as migration_dir:
migration_file = os.path.join(migration_dir, "0002_second.py")
with open(migration_file) as fp:
initial_content = fp.read()
with captured_stdout() as out:
call_command("makemigrations", "migrations", update=True)
self.assertFalse(
any(
filename.startswith("0003")
for filename in os.listdir(migration_dir)
)
)
self.assertIs(os.path.exists(migration_file), False)
new_migration_file = os.path.join(
migration_dir,
"0002_delete_tribble_author_rating_modelwithcustombase_and_more.py",
)
with open(new_migration_file) as fp:
self.assertNotEqual(initial_content, fp.read())
self.assertIn(f"Deleted {migration_file}", out.getvalue())
def test_makemigrations_update_existing_name(self):
with self.temporary_migration_module(
module="migrations.test_auto_now_add"
) as migration_dir:
migration_file = os.path.join(migration_dir, "0001_initial.py")
with open(migration_file) as fp:
initial_content = fp.read()
with captured_stdout() as out:
call_command("makemigrations", "migrations", update=True)
self.assertIs(os.path.exists(migration_file), False)
new_migration_file = os.path.join(
migration_dir,
"0001_initial_updated.py",
)
with open(new_migration_file) as fp:
self.assertNotEqual(initial_content, fp.read())
self.assertIn(f"Deleted {migration_file}", out.getvalue())
def test_makemigrations_update_applied_migration(self):
recorder = MigrationRecorder(connection)
recorder.record_applied("migrations", "0001_initial")
recorder.record_applied("migrations", "0002_second")
with self.temporary_migration_module(module="migrations.test_migrations"):
msg = "Cannot update applied migration 'migrations.0002_second'."
with self.assertRaisesMessage(CommandError, msg):
call_command("makemigrations", "migrations", update=True)
def test_makemigrations_update_no_migration(self):
with self.temporary_migration_module(module="migrations.test_migrations_empty"):
msg = "App migrations has no migration, cannot update last migration."
with self.assertRaisesMessage(CommandError, msg):
call_command("makemigrations", "migrations", update=True)
def test_makemigrations_update_squash_migration(self):
with self.temporary_migration_module(
module="migrations.test_migrations_squashed"
):
msg = "Cannot update squash migration 'migrations.0001_squashed_0002'."
with self.assertRaisesMessage(CommandError, msg):
call_command("makemigrations", "migrations", update=True)
def test_makemigrations_update_manual_porting(self):
with self.temporary_migration_module(
module="migrations.test_migrations_plan"
) as migration_dir:
with captured_stdout() as out:
call_command("makemigrations", "migrations", update=True)
# Previous migration exists.
previous_migration_file = os.path.join(migration_dir, "0005_fifth.py")
self.assertIs(os.path.exists(previous_migration_file), True)
# New updated migration exists.
files = [f for f in os.listdir(migration_dir) if f.startswith("0005_auto")]
updated_migration_file = os.path.join(migration_dir, files[0])
self.assertIs(os.path.exists(updated_migration_file), True)
self.assertIn(
f"Updated migration {updated_migration_file} requires manual porting.\n"
f"Previous migration {previous_migration_file} was kept and must be "
f"deleted after porting functions manually.",
out.getvalue(),
)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.alter_fk.author_app",
"migrations.migrations_test_apps.alter_fk.book_app",
]
)
def test_makemigrations_update_dependency_migration(self):
with self.temporary_migration_module(app_label="book_app"):
msg = (
"Cannot update migration 'book_app.0001_initial' that migrations "
"'author_app.0002_alter_id' depend on."
)
with self.assertRaisesMessage(CommandError, msg):
call_command("makemigrations", "book_app", update=True)
class SquashMigrationsTests(MigrationTestBase):
"""
Tests running the squashmigrations command.
"""
def test_squashmigrations_squashes(self):
"""
squashmigrations squashes migrations.
"""
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations"
) as migration_dir:
call_command(
"squashmigrations",
"migrations",
"0002",
interactive=False,
stdout=out,
no_color=True,
)
squashed_migration_file = os.path.join(
migration_dir, "0001_squashed_0002_second.py"
)
self.assertTrue(os.path.exists(squashed_migration_file))
self.assertEqual(
out.getvalue(),
"Will squash the following migrations:\n"
" - 0001_initial\n"
" - 0002_second\n"
"Optimizing...\n"
" Optimized from 8 operations to 2 operations.\n"
"Created new squashed migration %s\n"
" You should commit this migration but leave the old ones in place;\n"
" the new migration will be used for new installs. Once you are sure\n"
" all instances of the codebase have applied the migrations you "
"squashed,\n"
" you can delete them.\n" % squashed_migration_file,
)
def test_squashmigrations_initial_attribute(self):
with self.temporary_migration_module(
module="migrations.test_migrations"
) as migration_dir:
call_command(
"squashmigrations", "migrations", "0002", interactive=False, verbosity=0
)
squashed_migration_file = os.path.join(
migration_dir, "0001_squashed_0002_second.py"
)
with open(squashed_migration_file, encoding="utf-8") as fp:
content = fp.read()
self.assertIn("initial = True", content)
def test_squashmigrations_optimizes(self):
"""
squashmigrations optimizes operations.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations"):
call_command(
"squashmigrations",
"migrations",
"0002",
interactive=False,
verbosity=1,
stdout=out,
)
self.assertIn("Optimized from 8 operations to 2 operations.", out.getvalue())
def test_ticket_23799_squashmigrations_no_optimize(self):
"""
squashmigrations --no-optimize doesn't optimize operations.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations"):
call_command(
"squashmigrations",
"migrations",
"0002",
interactive=False,
verbosity=1,
no_optimize=True,
stdout=out,
)
self.assertIn("Skipping optimization", out.getvalue())
def test_squashmigrations_valid_start(self):
"""
squashmigrations accepts a starting migration.
"""
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_no_changes"
) as migration_dir:
call_command(
"squashmigrations",
"migrations",
"0002",
"0003",
interactive=False,
verbosity=1,
stdout=out,
)
squashed_migration_file = os.path.join(
migration_dir, "0002_second_squashed_0003_third.py"
)
with open(squashed_migration_file, encoding="utf-8") as fp:
content = fp.read()
if HAS_BLACK:
test_str = ' ("migrations", "0001_initial")'
else:
test_str = " ('migrations', '0001_initial')"
self.assertIn(test_str, content)
self.assertNotIn("initial = True", content)
out = out.getvalue()
self.assertNotIn(" - 0001_initial", out)
self.assertIn(" - 0002_second", out)
self.assertIn(" - 0003_third", out)
def test_squashmigrations_invalid_start(self):
"""
squashmigrations doesn't accept a starting migration after the ending migration.
"""
with self.temporary_migration_module(
module="migrations.test_migrations_no_changes"
):
msg = (
"The migration 'migrations.0003_third' cannot be found. Maybe "
"it comes after the migration 'migrations.0002_second'"
)
with self.assertRaisesMessage(CommandError, msg):
call_command(
"squashmigrations",
"migrations",
"0003",
"0002",
interactive=False,
verbosity=0,
)
def test_squashed_name_with_start_migration_name(self):
"""--squashed-name specifies the new migration's name."""
squashed_name = "squashed_name"
with self.temporary_migration_module(
module="migrations.test_migrations"
) as migration_dir:
call_command(
"squashmigrations",
"migrations",
"0001",
"0002",
squashed_name=squashed_name,
interactive=False,
verbosity=0,
)
squashed_migration_file = os.path.join(
migration_dir, "0001_%s.py" % squashed_name
)
self.assertTrue(os.path.exists(squashed_migration_file))
def test_squashed_name_without_start_migration_name(self):
"""--squashed-name also works if a start migration is omitted."""
squashed_name = "squashed_name"
with self.temporary_migration_module(
module="migrations.test_migrations"
) as migration_dir:
call_command(
"squashmigrations",
"migrations",
"0001",
squashed_name=squashed_name,
interactive=False,
verbosity=0,
)
squashed_migration_file = os.path.join(
migration_dir, "0001_%s.py" % squashed_name
)
self.assertTrue(os.path.exists(squashed_migration_file))
def test_squashed_name_exists(self):
msg = "Migration 0001_initial already exists. Use a different name."
with self.temporary_migration_module(module="migrations.test_migrations"):
with self.assertRaisesMessage(CommandError, msg):
call_command(
"squashmigrations",
"migrations",
"0001",
"0002",
squashed_name="initial",
interactive=False,
verbosity=0,
)
def test_squashmigrations_manual_porting(self):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_manual_porting",
) as migration_dir:
call_command(
"squashmigrations",
"migrations",
"0002",
interactive=False,
stdout=out,
no_color=True,
)
squashed_migration_file = os.path.join(
migration_dir,
"0001_squashed_0002_second.py",
)
self.assertTrue(os.path.exists(squashed_migration_file))
black_warning = ""
if HAS_BLACK:
black_warning = (
"Squashed migration couldn't be formatted using the "
'"black" command. You can call it manually.\n'
)
self.assertEqual(
out.getvalue(),
f"Will squash the following migrations:\n"
f" - 0001_initial\n"
f" - 0002_second\n"
f"Optimizing...\n"
f" No optimizations possible.\n"
f"Created new squashed migration {squashed_migration_file}\n"
f" You should commit this migration but leave the old ones in place;\n"
f" the new migration will be used for new installs. Once you are sure\n"
f" all instances of the codebase have applied the migrations you "
f"squashed,\n"
f" you can delete them.\n"
f"Manual porting required\n"
f" Your migrations contained functions that must be manually copied "
f"over,\n"
f" as we could not safely copy their implementation.\n"
f" See the comment at the top of the squashed migration for details.\n"
+ black_warning,
)
class AppLabelErrorTests(TestCase):
"""
This class inherits TestCase because MigrationTestBase uses
`available_apps = ['migrations']` which means that it's the only installed
app. 'django.contrib.auth' must be in INSTALLED_APPS for some of these
tests.
"""
nonexistent_app_error = "No installed app with label 'nonexistent_app'."
did_you_mean_auth_error = (
"No installed app with label 'django.contrib.auth'. Did you mean 'auth'?"
)
def test_makemigrations_nonexistent_app_label(self):
err = io.StringIO()
with self.assertRaises(SystemExit):
call_command("makemigrations", "nonexistent_app", stderr=err)
self.assertIn(self.nonexistent_app_error, err.getvalue())
def test_makemigrations_app_name_specified_as_label(self):
err = io.StringIO()
with self.assertRaises(SystemExit):
call_command("makemigrations", "django.contrib.auth", stderr=err)
self.assertIn(self.did_you_mean_auth_error, err.getvalue())
def test_migrate_nonexistent_app_label(self):
with self.assertRaisesMessage(CommandError, self.nonexistent_app_error):
call_command("migrate", "nonexistent_app")
def test_migrate_app_name_specified_as_label(self):
with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error):
call_command("migrate", "django.contrib.auth")
def test_showmigrations_nonexistent_app_label(self):
err = io.StringIO()
with self.assertRaises(SystemExit):
call_command("showmigrations", "nonexistent_app", stderr=err)
self.assertIn(self.nonexistent_app_error, err.getvalue())
def test_showmigrations_app_name_specified_as_label(self):
err = io.StringIO()
with self.assertRaises(SystemExit):
call_command("showmigrations", "django.contrib.auth", stderr=err)
self.assertIn(self.did_you_mean_auth_error, err.getvalue())
def test_sqlmigrate_nonexistent_app_label(self):
with self.assertRaisesMessage(CommandError, self.nonexistent_app_error):
call_command("sqlmigrate", "nonexistent_app", "0002")
def test_sqlmigrate_app_name_specified_as_label(self):
with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error):
call_command("sqlmigrate", "django.contrib.auth", "0002")
def test_squashmigrations_nonexistent_app_label(self):
with self.assertRaisesMessage(CommandError, self.nonexistent_app_error):
call_command("squashmigrations", "nonexistent_app", "0002")
def test_squashmigrations_app_name_specified_as_label(self):
with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error):
call_command("squashmigrations", "django.contrib.auth", "0002")
def test_optimizemigration_nonexistent_app_label(self):
with self.assertRaisesMessage(CommandError, self.nonexistent_app_error):
call_command("optimizemigration", "nonexistent_app", "0002")
def test_optimizemigration_app_name_specified_as_label(self):
with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error):
call_command("optimizemigration", "django.contrib.auth", "0002")
class OptimizeMigrationTests(MigrationTestBase):
def test_no_optimization_possible(self):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations"
) as migration_dir:
call_command(
"optimizemigration", "migrations", "0002", stdout=out, no_color=True
)
migration_file = os.path.join(migration_dir, "0002_second.py")
self.assertTrue(os.path.exists(migration_file))
call_command(
"optimizemigration",
"migrations",
"0002",
stdout=out,
no_color=True,
verbosity=0,
)
self.assertEqual(out.getvalue(), "No optimizations possible.\n")
def test_optimization(self):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations"
) as migration_dir:
call_command(
"optimizemigration", "migrations", "0001", stdout=out, no_color=True
)
initial_migration_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_migration_file))
with open(initial_migration_file) as fp:
content = fp.read()
self.assertIn(
'("bool", models.BooleanField'
if HAS_BLACK
else "('bool', models.BooleanField",
content,
)
self.assertEqual(
out.getvalue(),
f"Optimizing from 4 operations to 2 operations.\n"
f"Optimized migration {initial_migration_file}\n",
)
def test_optimization_no_verbosity(self):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations"
) as migration_dir:
call_command(
"optimizemigration",
"migrations",
"0001",
stdout=out,
no_color=True,
verbosity=0,
)
initial_migration_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_migration_file))
with open(initial_migration_file) as fp:
content = fp.read()
self.assertIn(
'("bool", models.BooleanField'
if HAS_BLACK
else "('bool', models.BooleanField",
content,
)
self.assertEqual(out.getvalue(), "")
def test_creates_replace_migration_manual_porting(self):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_manual_porting"
) as migration_dir:
call_command(
"optimizemigration", "migrations", "0003", stdout=out, no_color=True
)
optimized_migration_file = os.path.join(
migration_dir, "0003_third_optimized.py"
)
self.assertTrue(os.path.exists(optimized_migration_file))
with open(optimized_migration_file) as fp:
content = fp.read()
self.assertIn("replaces = [", content)
black_warning = ""
if HAS_BLACK:
black_warning = (
"Optimized migration couldn't be formatted using the "
'"black" command. You can call it manually.\n'
)
self.assertEqual(
out.getvalue(),
"Optimizing from 3 operations to 2 operations.\n"
"Manual porting required\n"
" Your migrations contained functions that must be manually copied over,\n"
" as we could not safely copy their implementation.\n"
" See the comment at the top of the optimized migration for details.\n"
+ black_warning
+ f"Optimized migration {optimized_migration_file}\n",
)
def test_fails_squash_migration_manual_porting(self):
out = io.StringIO()
with self.temporary_migration_module(
module="migrations.test_migrations_manual_porting"
) as migration_dir:
version = get_docs_version()
msg = (
f"Migration will require manual porting but is already a squashed "
f"migration.\nTransition to a normal migration first: "
f"https://docs.djangoproject.com/en/{version}/topics/migrations/"
f"#squashing-migrations"
)
with self.assertRaisesMessage(CommandError, msg):
call_command("optimizemigration", "migrations", "0004", stdout=out)
optimized_migration_file = os.path.join(
migration_dir, "0004_fourth_optimized.py"
)
self.assertFalse(os.path.exists(optimized_migration_file))
self.assertEqual(
out.getvalue(), "Optimizing from 3 operations to 2 operations.\n"
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_optimizemigration_check(self):
with self.assertRaises(SystemExit):
call_command(
"optimizemigration", "--check", "migrations", "0001", verbosity=0
)
call_command("optimizemigration", "--check", "migrations", "0002", verbosity=0)
@override_settings(
INSTALLED_APPS=["migrations.migrations_test_apps.unmigrated_app_simple"],
)
def test_app_without_migrations(self):
msg = "App 'unmigrated_app_simple' does not have migrations."
with self.assertRaisesMessage(CommandError, msg):
call_command("optimizemigration", "unmigrated_app_simple", "0001")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_clashing_prefix"},
)
def test_ambigious_prefix(self):
msg = (
"More than one migration matches 'a' in app 'migrations'. Please "
"be more specific."
)
with self.assertRaisesMessage(CommandError, msg):
call_command("optimizemigration", "migrations", "a")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_unknown_prefix(self):
msg = "Cannot find a migration matching 'nonexistent' from app 'migrations'."
with self.assertRaisesMessage(CommandError, msg):
call_command("optimizemigration", "migrations", "nonexistent")
|
bb9a228c43a0c153dd090fee0af081e9ea986f7d5e2c60f5d3c5995e9bca7c28 | import datetime
import pickle
import unittest
import uuid
from collections import namedtuple
from copy import deepcopy
from decimal import Decimal
from unittest import mock
from django.core.exceptions import FieldError
from django.db import DatabaseError, NotSupportedError, connection
from django.db.models import (
AutoField,
Avg,
BinaryField,
BooleanField,
Case,
CharField,
Count,
DateField,
DateTimeField,
DecimalField,
DurationField,
Exists,
Expression,
ExpressionList,
ExpressionWrapper,
F,
FloatField,
Func,
IntegerField,
Max,
Min,
Model,
OrderBy,
OuterRef,
Q,
StdDev,
Subquery,
Sum,
TimeField,
UUIDField,
Value,
Variance,
When,
)
from django.db.models.expressions import (
Col,
Combinable,
CombinedExpression,
RawSQL,
Ref,
)
from django.db.models.functions import (
Coalesce,
Concat,
Left,
Length,
Lower,
Substr,
Upper,
)
from django.db.models.sql import constants
from django.db.models.sql.datastructures import Join
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import (
Approximate,
CaptureQueriesContext,
isolate_apps,
register_lookup,
)
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import SimpleLazyObject
from .models import (
UUID,
UUIDPK,
Company,
Employee,
Experiment,
Manager,
Number,
RemoteEmployee,
Result,
SimulationRun,
Time,
)
class BasicExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.example_inc = Company.objects.create(
name="Example Inc.",
num_employees=2300,
num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10),
)
cls.foobar_ltd = Company.objects.create(
name="Foobar Ltd.",
num_employees=3,
num_chairs=4,
based_in_eu=True,
ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20),
)
cls.max = Employee.objects.create(
firstname="Max", lastname="Mustermann", salary=30
)
cls.gmbh = Company.objects.create(
name="Test GmbH", num_employees=32, num_chairs=1, ceo=cls.max
)
def setUp(self):
self.company_query = Company.objects.values(
"name", "num_employees", "num_chairs"
).order_by("name", "num_employees", "num_chairs")
def test_annotate_values_aggregate(self):
companies = (
Company.objects.annotate(
salaries=F("ceo__salary"),
)
.values("num_employees", "salaries")
.aggregate(
result=Sum(
F("salaries") + F("num_employees"), output_field=IntegerField()
),
)
)
self.assertEqual(companies["result"], 2395)
def test_annotate_values_filter(self):
companies = (
Company.objects.annotate(
foo=RawSQL("%s", ["value"]),
)
.filter(foo="value")
.order_by("name")
)
self.assertSequenceEqual(
companies,
[self.example_inc, self.foobar_ltd, self.gmbh],
)
def test_annotate_values_count(self):
companies = Company.objects.annotate(foo=RawSQL("%s", ["value"]))
self.assertEqual(companies.count(), 3)
@skipUnlessDBFeature("supports_boolean_expr_in_select_clause")
def test_filtering_on_annotate_that_uses_q(self):
self.assertEqual(
Company.objects.annotate(
num_employees_check=ExpressionWrapper(
Q(num_employees__gt=3), output_field=BooleanField()
)
)
.filter(num_employees_check=True)
.count(),
2,
)
def test_filtering_on_q_that_is_boolean(self):
self.assertEqual(
Company.objects.filter(
ExpressionWrapper(Q(num_employees__gt=3), output_field=BooleanField())
).count(),
2,
)
def test_filtering_on_rawsql_that_is_boolean(self):
self.assertEqual(
Company.objects.filter(
RawSQL("num_employees > %s", (3,), output_field=BooleanField()),
).count(),
2,
)
def test_filter_inter_attribute(self):
# We can filter on attribute relationships on same model obj, e.g.
# find companies where the number of employees is greater
# than the number of chairs.
self.assertSequenceEqual(
self.company_query.filter(num_employees__gt=F("num_chairs")),
[
{
"num_chairs": 5,
"name": "Example Inc.",
"num_employees": 2300,
},
{"num_chairs": 1, "name": "Test GmbH", "num_employees": 32},
],
)
def test_update(self):
# We can set one field to have the value of another field
# Make sure we have enough chairs
self.company_query.update(num_chairs=F("num_employees"))
self.assertSequenceEqual(
self.company_query,
[
{"num_chairs": 2300, "name": "Example Inc.", "num_employees": 2300},
{"num_chairs": 3, "name": "Foobar Ltd.", "num_employees": 3},
{"num_chairs": 32, "name": "Test GmbH", "num_employees": 32},
],
)
def test_arithmetic(self):
# We can perform arithmetic operations in expressions
# Make sure we have 2 spare chairs
self.company_query.update(num_chairs=F("num_employees") + 2)
self.assertSequenceEqual(
self.company_query,
[
{"num_chairs": 2302, "name": "Example Inc.", "num_employees": 2300},
{"num_chairs": 5, "name": "Foobar Ltd.", "num_employees": 3},
{"num_chairs": 34, "name": "Test GmbH", "num_employees": 32},
],
)
def test_order_of_operations(self):
# Law of order of operations is followed
self.company_query.update(
num_chairs=F("num_employees") + 2 * F("num_employees")
)
self.assertSequenceEqual(
self.company_query,
[
{"num_chairs": 6900, "name": "Example Inc.", "num_employees": 2300},
{"num_chairs": 9, "name": "Foobar Ltd.", "num_employees": 3},
{"num_chairs": 96, "name": "Test GmbH", "num_employees": 32},
],
)
def test_parenthesis_priority(self):
# Law of order of operations can be overridden by parentheses
self.company_query.update(
num_chairs=(F("num_employees") + 2) * F("num_employees")
)
self.assertSequenceEqual(
self.company_query,
[
{"num_chairs": 5294600, "name": "Example Inc.", "num_employees": 2300},
{"num_chairs": 15, "name": "Foobar Ltd.", "num_employees": 3},
{"num_chairs": 1088, "name": "Test GmbH", "num_employees": 32},
],
)
def test_update_with_fk(self):
# ForeignKey can become updated with the value of another ForeignKey.
self.assertEqual(Company.objects.update(point_of_contact=F("ceo")), 3)
self.assertQuerysetEqual(
Company.objects.all(),
["Joe Smith", "Frank Meyer", "Max Mustermann"],
lambda c: str(c.point_of_contact),
ordered=False,
)
def test_update_with_none(self):
Number.objects.create(integer=1, float=1.0)
Number.objects.create(integer=2)
Number.objects.filter(float__isnull=False).update(float=Value(None))
self.assertQuerysetEqual(
Number.objects.all(), [None, None], lambda n: n.float, ordered=False
)
def test_filter_with_join(self):
# F Expressions can also span joins
Company.objects.update(point_of_contact=F("ceo"))
c = Company.objects.first()
c.point_of_contact = Employee.objects.create(
firstname="Guido", lastname="van Rossum"
)
c.save()
self.assertQuerysetEqual(
Company.objects.filter(ceo__firstname=F("point_of_contact__firstname")),
["Foobar Ltd.", "Test GmbH"],
lambda c: c.name,
ordered=False,
)
Company.objects.exclude(ceo__firstname=F("point_of_contact__firstname")).update(
name="foo"
)
self.assertEqual(
Company.objects.exclude(ceo__firstname=F("point_of_contact__firstname"))
.get()
.name,
"foo",
)
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
Company.objects.exclude(
ceo__firstname=F("point_of_contact__firstname")
).update(name=F("point_of_contact__lastname"))
def test_object_update(self):
# F expressions can be used to update attributes on single objects
self.gmbh.num_employees = F("num_employees") + 4
self.gmbh.save()
self.gmbh.refresh_from_db()
self.assertEqual(self.gmbh.num_employees, 36)
def test_new_object_save(self):
# We should be able to use Funcs when inserting new data
test_co = Company(
name=Lower(Value("UPPER")), num_employees=32, num_chairs=1, ceo=self.max
)
test_co.save()
test_co.refresh_from_db()
self.assertEqual(test_co.name, "upper")
def test_new_object_create(self):
test_co = Company.objects.create(
name=Lower(Value("UPPER")), num_employees=32, num_chairs=1, ceo=self.max
)
test_co.refresh_from_db()
self.assertEqual(test_co.name, "upper")
def test_object_create_with_aggregate(self):
# Aggregates are not allowed when inserting new data
msg = (
"Aggregate functions are not allowed in this query "
"(num_employees=Max(Value(1)))."
)
with self.assertRaisesMessage(FieldError, msg):
Company.objects.create(
name="Company",
num_employees=Max(Value(1)),
num_chairs=1,
ceo=Employee.objects.create(
firstname="Just", lastname="Doit", salary=30
),
)
def test_object_update_fk(self):
# F expressions cannot be used to update attributes which are foreign
# keys, or attributes which involve joins.
test_gmbh = Company.objects.get(pk=self.gmbh.pk)
msg = 'F(ceo)": "Company.point_of_contact" must be a "Employee" instance.'
with self.assertRaisesMessage(ValueError, msg):
test_gmbh.point_of_contact = F("ceo")
test_gmbh.point_of_contact = self.gmbh.ceo
test_gmbh.save()
test_gmbh.name = F("ceo__lastname")
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
test_gmbh.save()
def test_update_inherited_field_value(self):
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
RemoteEmployee.objects.update(adjusted_salary=F("salary") * 5)
def test_object_update_unsaved_objects(self):
# F expressions cannot be used to update attributes on objects which do
# not yet exist in the database
acme = Company(
name="The Acme Widget Co.", num_employees=12, num_chairs=5, ceo=self.max
)
acme.num_employees = F("num_employees") + 16
msg = (
'Failed to insert expression "Col(expressions_company, '
'expressions.Company.num_employees) + Value(16)" on '
"expressions.Company.num_employees. F() expressions can only be "
"used to update, not to insert."
)
with self.assertRaisesMessage(ValueError, msg):
acme.save()
acme.num_employees = 12
acme.name = Lower(F("name"))
msg = (
'Failed to insert expression "Lower(Col(expressions_company, '
'expressions.Company.name))" on expressions.Company.name. F() '
"expressions can only be used to update, not to insert."
)
with self.assertRaisesMessage(ValueError, msg):
acme.save()
def test_ticket_11722_iexact_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
test = Employee.objects.create(firstname="Test", lastname="test")
queryset = Employee.objects.filter(firstname__iexact=F("lastname"))
self.assertSequenceEqual(queryset, [test])
def test_ticket_16731_startswith_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
e2 = Employee.objects.create(firstname="Jack", lastname="Jackson")
e3 = Employee.objects.create(firstname="Jack", lastname="jackson")
self.assertSequenceEqual(
Employee.objects.filter(lastname__startswith=F("firstname")),
[e2, e3] if connection.features.has_case_insensitive_like else [e2],
)
qs = Employee.objects.filter(lastname__istartswith=F("firstname")).order_by(
"pk"
)
self.assertSequenceEqual(qs, [e2, e3])
def test_ticket_18375_join_reuse(self):
# Reverse multijoin F() references and the lookup target the same join.
# Pre #18375 the F() join was generated first and the lookup couldn't
# reuse that join.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F("company_ceo_set__num_employees")
)
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_ticket_18375_kwarg_ordering(self):
# The next query was dict-randomization dependent - if the "gte=1"
# was seen first, then the F() will reuse the join generated by the
# gte lookup, if F() was seen first, then it generated a join the
# other lookups could not reuse.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F("company_ceo_set__num_employees"),
company_ceo_set__num_chairs__gte=1,
)
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_ticket_18375_kwarg_ordering_2(self):
# Another similar case for F() than above. Now we have the same join
# in two filter kwargs, one in the lhs lookup, one in F. Here pre
# #18375 the amount of joins generated was random if dict
# randomization was enabled, that is the generated query dependent
# on which clause was seen first.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F("pk"),
pk=F("company_ceo_set__num_employees"),
)
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_ticket_18375_chained_filters(self):
# F() expressions do not reuse joins from previous filter.
qs = Employee.objects.filter(company_ceo_set__num_employees=F("pk")).filter(
company_ceo_set__num_employees=F("company_ceo_set__num_employees")
)
self.assertEqual(str(qs.query).count("JOIN"), 2)
def test_order_by_exists(self):
mary = Employee.objects.create(
firstname="Mary", lastname="Mustermann", salary=20
)
mustermanns_by_seniority = Employee.objects.filter(
lastname="Mustermann"
).order_by(
# Order by whether the employee is the CEO of a company
Exists(Company.objects.filter(ceo=OuterRef("pk"))).desc()
)
self.assertSequenceEqual(mustermanns_by_seniority, [self.max, mary])
def test_order_by_multiline_sql(self):
raw_order_by = (
RawSQL(
"""
CASE WHEN num_employees > 1000
THEN num_chairs
ELSE 0 END
""",
[],
).desc(),
RawSQL(
"""
CASE WHEN num_chairs > 1
THEN 1
ELSE 0 END
""",
[],
).asc(),
)
for qs in (
Company.objects.all(),
Company.objects.distinct(),
):
with self.subTest(qs=qs):
self.assertSequenceEqual(
qs.order_by(*raw_order_by),
[self.example_inc, self.gmbh, self.foobar_ltd],
)
def test_outerref(self):
inner = Company.objects.filter(point_of_contact=OuterRef("pk"))
msg = (
"This queryset contains a reference to an outer query and may only "
"be used in a subquery."
)
with self.assertRaisesMessage(ValueError, msg):
inner.exists()
outer = Employee.objects.annotate(is_point_of_contact=Exists(inner))
self.assertIs(outer.exists(), True)
def test_exist_single_field_output_field(self):
queryset = Company.objects.values("pk")
self.assertIsInstance(Exists(queryset).output_field, BooleanField)
def test_subquery(self):
Company.objects.filter(name="Example Inc.").update(
point_of_contact=Employee.objects.get(firstname="Joe", lastname="Smith"),
ceo=self.max,
)
Employee.objects.create(firstname="Bob", lastname="Brown", salary=40)
qs = (
Employee.objects.annotate(
is_point_of_contact=Exists(
Company.objects.filter(point_of_contact=OuterRef("pk"))
),
is_not_point_of_contact=~Exists(
Company.objects.filter(point_of_contact=OuterRef("pk"))
),
is_ceo_of_small_company=Exists(
Company.objects.filter(num_employees__lt=200, ceo=OuterRef("pk"))
),
is_ceo_small_2=~~Exists(
Company.objects.filter(num_employees__lt=200, ceo=OuterRef("pk"))
),
largest_company=Subquery(
Company.objects.order_by("-num_employees")
.filter(Q(ceo=OuterRef("pk")) | Q(point_of_contact=OuterRef("pk")))
.values("name")[:1],
output_field=CharField(),
),
)
.values(
"firstname",
"is_point_of_contact",
"is_not_point_of_contact",
"is_ceo_of_small_company",
"is_ceo_small_2",
"largest_company",
)
.order_by("firstname")
)
results = list(qs)
# Could use Coalesce(subq, Value('')) instead except for the bug in
# cx_Oracle mentioned in #23843.
bob = results[0]
if (
bob["largest_company"] == ""
and connection.features.interprets_empty_strings_as_nulls
):
bob["largest_company"] = None
self.assertEqual(
results,
[
{
"firstname": "Bob",
"is_point_of_contact": False,
"is_not_point_of_contact": True,
"is_ceo_of_small_company": False,
"is_ceo_small_2": False,
"largest_company": None,
},
{
"firstname": "Frank",
"is_point_of_contact": False,
"is_not_point_of_contact": True,
"is_ceo_of_small_company": True,
"is_ceo_small_2": True,
"largest_company": "Foobar Ltd.",
},
{
"firstname": "Joe",
"is_point_of_contact": True,
"is_not_point_of_contact": False,
"is_ceo_of_small_company": False,
"is_ceo_small_2": False,
"largest_company": "Example Inc.",
},
{
"firstname": "Max",
"is_point_of_contact": False,
"is_not_point_of_contact": True,
"is_ceo_of_small_company": True,
"is_ceo_small_2": True,
"largest_company": "Example Inc.",
},
],
)
# A less elegant way to write the same query: this uses a LEFT OUTER
# JOIN and an IS NULL, inside a WHERE NOT IN which is probably less
# efficient than EXISTS.
self.assertCountEqual(
qs.filter(is_point_of_contact=True).values("pk"),
Employee.objects.exclude(company_point_of_contact_set=None).values("pk"),
)
def test_subquery_eq(self):
qs = Employee.objects.annotate(
is_ceo=Exists(Company.objects.filter(ceo=OuterRef("pk"))),
is_point_of_contact=Exists(
Company.objects.filter(point_of_contact=OuterRef("pk")),
),
small_company=Exists(
queryset=Company.objects.filter(num_employees__lt=200),
),
).filter(is_ceo=True, is_point_of_contact=False, small_company=True)
self.assertNotEqual(
qs.query.annotations["is_ceo"],
qs.query.annotations["is_point_of_contact"],
)
self.assertNotEqual(
qs.query.annotations["is_ceo"],
qs.query.annotations["small_company"],
)
def test_subquery_sql(self):
employees = Employee.objects.all()
employees_subquery = Subquery(employees)
self.assertIs(employees_subquery.query.subquery, True)
self.assertIs(employees.query.subquery, False)
compiler = employees_subquery.query.get_compiler(connection=connection)
sql, _ = employees_subquery.as_sql(compiler, connection)
self.assertIn("(SELECT ", sql)
def test_in_subquery(self):
# This is a contrived test (and you really wouldn't write this query),
# but it is a succinct way to test the __in=Subquery() construct.
small_companies = Company.objects.filter(num_employees__lt=200).values("pk")
subquery_test = Company.objects.filter(pk__in=Subquery(small_companies))
self.assertCountEqual(subquery_test, [self.foobar_ltd, self.gmbh])
subquery_test2 = Company.objects.filter(
pk=Subquery(small_companies.filter(num_employees=3))
)
self.assertCountEqual(subquery_test2, [self.foobar_ltd])
def test_uuid_pk_subquery(self):
u = UUIDPK.objects.create()
UUID.objects.create(uuid_fk=u)
qs = UUIDPK.objects.filter(id__in=Subquery(UUID.objects.values("uuid_fk__id")))
self.assertCountEqual(qs, [u])
def test_nested_subquery(self):
inner = Company.objects.filter(point_of_contact=OuterRef("pk"))
outer = Employee.objects.annotate(is_point_of_contact=Exists(inner))
contrived = Employee.objects.annotate(
is_point_of_contact=Subquery(
outer.filter(pk=OuterRef("pk")).values("is_point_of_contact"),
output_field=BooleanField(),
),
)
self.assertCountEqual(contrived.values_list(), outer.values_list())
def test_nested_subquery_join_outer_ref(self):
inner = Employee.objects.filter(pk=OuterRef("ceo__pk")).values("pk")
qs = Employee.objects.annotate(
ceo_company=Subquery(
Company.objects.filter(
ceo__in=inner,
ceo__pk=OuterRef("pk"),
).values("pk"),
),
)
self.assertSequenceEqual(
qs.values_list("ceo_company", flat=True),
[self.example_inc.pk, self.foobar_ltd.pk, self.gmbh.pk],
)
def test_nested_subquery_outer_ref_2(self):
first = Time.objects.create(time="09:00")
second = Time.objects.create(time="17:00")
third = Time.objects.create(time="21:00")
SimulationRun.objects.bulk_create(
[
SimulationRun(start=first, end=second, midpoint="12:00"),
SimulationRun(start=first, end=third, midpoint="15:00"),
SimulationRun(start=second, end=first, midpoint="00:00"),
]
)
inner = Time.objects.filter(
time=OuterRef(OuterRef("time")), pk=OuterRef("start")
).values("time")
middle = SimulationRun.objects.annotate(other=Subquery(inner)).values("other")[
:1
]
outer = Time.objects.annotate(other=Subquery(middle, output_field=TimeField()))
# This is a contrived example. It exercises the double OuterRef form.
self.assertCountEqual(outer, [first, second, third])
def test_nested_subquery_outer_ref_with_autofield(self):
first = Time.objects.create(time="09:00")
second = Time.objects.create(time="17:00")
SimulationRun.objects.create(start=first, end=second, midpoint="12:00")
inner = SimulationRun.objects.filter(start=OuterRef(OuterRef("pk"))).values(
"start"
)
middle = Time.objects.annotate(other=Subquery(inner)).values("other")[:1]
outer = Time.objects.annotate(
other=Subquery(middle, output_field=IntegerField())
)
# This exercises the double OuterRef form with AutoField as pk.
self.assertCountEqual(outer, [first, second])
def test_annotations_within_subquery(self):
Company.objects.filter(num_employees__lt=50).update(
ceo=Employee.objects.get(firstname="Frank")
)
inner = (
Company.objects.filter(ceo=OuterRef("pk"))
.values("ceo")
.annotate(total_employees=Sum("num_employees"))
.values("total_employees")
)
outer = Employee.objects.annotate(total_employees=Subquery(inner)).filter(
salary__lte=Subquery(inner)
)
self.assertSequenceEqual(
outer.order_by("-total_employees").values("salary", "total_employees"),
[
{"salary": 10, "total_employees": 2300},
{"salary": 20, "total_employees": 35},
],
)
def test_subquery_references_joined_table_twice(self):
inner = Company.objects.filter(
num_chairs__gte=OuterRef("ceo__salary"),
num_employees__gte=OuterRef("point_of_contact__salary"),
)
# Another contrived example (there is no need to have a subquery here)
outer = Company.objects.filter(pk__in=Subquery(inner.values("pk")))
self.assertFalse(outer.exists())
def test_subquery_filter_by_aggregate(self):
Number.objects.create(integer=1000, float=1.2)
Employee.objects.create(salary=1000)
qs = Number.objects.annotate(
min_valuable_count=Subquery(
Employee.objects.filter(
salary=OuterRef("integer"),
)
.annotate(cnt=Count("salary"))
.filter(cnt__gt=0)
.values("cnt")[:1]
),
)
self.assertEqual(qs.get().float, 1.2)
def test_subquery_filter_by_lazy(self):
self.max.manager = Manager.objects.create(name="Manager")
self.max.save()
max_manager = SimpleLazyObject(
lambda: Manager.objects.get(pk=self.max.manager.pk)
)
qs = Company.objects.annotate(
ceo_manager=Subquery(
Employee.objects.filter(
lastname=OuterRef("ceo__lastname"),
).values("manager"),
),
).filter(ceo_manager=max_manager)
self.assertEqual(qs.get(), self.gmbh)
def test_aggregate_subquery_annotation(self):
with self.assertNumQueries(1) as ctx:
aggregate = Company.objects.annotate(
ceo_salary=Subquery(
Employee.objects.filter(
id=OuterRef("ceo_id"),
).values("salary")
),
).aggregate(
ceo_salary_gt_20=Count("pk", filter=Q(ceo_salary__gt=20)),
)
self.assertEqual(aggregate, {"ceo_salary_gt_20": 1})
# Aggregation over a subquery annotation doesn't annotate the subquery
# twice in the inner query.
sql = ctx.captured_queries[0]["sql"]
self.assertLessEqual(sql.count("SELECT"), 3)
# GROUP BY isn't required to aggregate over a query that doesn't
# contain nested aggregates.
self.assertNotIn("GROUP BY", sql)
@skipUnlessDBFeature("supports_over_clause")
def test_aggregate_rawsql_annotation(self):
with self.assertNumQueries(1) as ctx:
aggregate = Company.objects.annotate(
salary=RawSQL("SUM(num_chairs) OVER (ORDER BY num_employees)", []),
).aggregate(
count=Count("pk"),
)
self.assertEqual(aggregate, {"count": 3})
sql = ctx.captured_queries[0]["sql"]
self.assertNotIn("GROUP BY", sql)
def test_explicit_output_field(self):
class FuncA(Func):
output_field = CharField()
class FuncB(Func):
pass
expr = FuncB(FuncA())
self.assertEqual(expr.output_field, FuncA.output_field)
def test_outerref_mixed_case_table_name(self):
inner = Result.objects.filter(result_time__gte=OuterRef("experiment__assigned"))
outer = Result.objects.filter(pk__in=Subquery(inner.values("pk")))
self.assertFalse(outer.exists())
def test_outerref_with_operator(self):
inner = Company.objects.filter(num_employees=OuterRef("ceo__salary") + 2)
outer = Company.objects.filter(pk__in=Subquery(inner.values("pk")))
self.assertEqual(outer.get().name, "Test GmbH")
def test_nested_outerref_with_function(self):
self.gmbh.point_of_contact = Employee.objects.get(lastname="Meyer")
self.gmbh.save()
inner = Employee.objects.filter(
lastname__startswith=Left(OuterRef(OuterRef("lastname")), 1),
)
qs = Employee.objects.annotate(
ceo_company=Subquery(
Company.objects.filter(
point_of_contact__in=inner,
ceo__pk=OuterRef("pk"),
).values("name"),
),
).filter(ceo_company__isnull=False)
self.assertEqual(qs.get().ceo_company, "Test GmbH")
def test_annotation_with_outerref(self):
gmbh_salary = Company.objects.annotate(
max_ceo_salary_raise=Subquery(
Company.objects.annotate(
salary_raise=OuterRef("num_employees") + F("num_employees"),
)
.order_by("-salary_raise")
.values("salary_raise")[:1],
output_field=IntegerField(),
),
).get(pk=self.gmbh.pk)
self.assertEqual(gmbh_salary.max_ceo_salary_raise, 2332)
def test_annotation_with_nested_outerref(self):
self.gmbh.point_of_contact = Employee.objects.get(lastname="Meyer")
self.gmbh.save()
inner = Employee.objects.annotate(
outer_lastname=OuterRef(OuterRef("lastname")),
).filter(lastname__startswith=Left("outer_lastname", 1))
qs = Employee.objects.annotate(
ceo_company=Subquery(
Company.objects.filter(
point_of_contact__in=inner,
ceo__pk=OuterRef("pk"),
).values("name"),
),
).filter(ceo_company__isnull=False)
self.assertEqual(qs.get().ceo_company, "Test GmbH")
def test_pickle_expression(self):
expr = Value(1)
expr.convert_value # populate cached property
self.assertEqual(pickle.loads(pickle.dumps(expr)), expr)
def test_incorrect_field_in_F_expression(self):
with self.assertRaisesMessage(
FieldError, "Cannot resolve keyword 'nope' into field."
):
list(Employee.objects.filter(firstname=F("nope")))
def test_incorrect_joined_field_in_F_expression(self):
with self.assertRaisesMessage(
FieldError, "Cannot resolve keyword 'nope' into field."
):
list(Company.objects.filter(ceo__pk=F("point_of_contact__nope")))
def test_exists_in_filter(self):
inner = Company.objects.filter(ceo=OuterRef("pk")).values("pk")
qs1 = Employee.objects.filter(Exists(inner))
qs2 = Employee.objects.annotate(found=Exists(inner)).filter(found=True)
self.assertCountEqual(qs1, qs2)
self.assertFalse(Employee.objects.exclude(Exists(inner)).exists())
self.assertCountEqual(qs2, Employee.objects.exclude(~Exists(inner)))
def test_subquery_in_filter(self):
inner = Company.objects.filter(ceo=OuterRef("pk")).values("based_in_eu")
self.assertSequenceEqual(
Employee.objects.filter(Subquery(inner)),
[self.foobar_ltd.ceo],
)
def test_subquery_group_by_outerref_in_filter(self):
inner = (
Company.objects.annotate(
employee=OuterRef("pk"),
)
.values("employee")
.annotate(
min_num_chairs=Min("num_chairs"),
)
.values("ceo")
)
self.assertIs(Employee.objects.filter(pk__in=Subquery(inner)).exists(), True)
def test_case_in_filter_if_boolean_output_field(self):
is_ceo = Company.objects.filter(ceo=OuterRef("pk"))
is_poc = Company.objects.filter(point_of_contact=OuterRef("pk"))
qs = Employee.objects.filter(
Case(
When(Exists(is_ceo), then=True),
When(Exists(is_poc), then=True),
default=False,
output_field=BooleanField(),
),
)
self.assertCountEqual(qs, [self.example_inc.ceo, self.foobar_ltd.ceo, self.max])
def test_boolean_expression_combined(self):
is_ceo = Company.objects.filter(ceo=OuterRef("pk"))
is_poc = Company.objects.filter(point_of_contact=OuterRef("pk"))
self.gmbh.point_of_contact = self.max
self.gmbh.save()
self.assertCountEqual(
Employee.objects.filter(Exists(is_ceo) | Exists(is_poc)),
[self.example_inc.ceo, self.foobar_ltd.ceo, self.max],
)
self.assertCountEqual(
Employee.objects.filter(Exists(is_ceo) & Exists(is_poc)),
[self.max],
)
self.assertCountEqual(
Employee.objects.filter(Exists(is_ceo) & Q(salary__gte=30)),
[self.max],
)
self.assertCountEqual(
Employee.objects.filter(Exists(is_poc) | Q(salary__lt=15)),
[self.example_inc.ceo, self.max],
)
self.assertCountEqual(
Employee.objects.filter(Q(salary__gte=30) & Exists(is_ceo)),
[self.max],
)
self.assertCountEqual(
Employee.objects.filter(Q(salary__lt=15) | Exists(is_poc)),
[self.example_inc.ceo, self.max],
)
def test_boolean_expression_combined_with_empty_Q(self):
is_poc = Company.objects.filter(point_of_contact=OuterRef("pk"))
self.gmbh.point_of_contact = self.max
self.gmbh.save()
tests = [
Exists(is_poc) & Q(),
Q() & Exists(is_poc),
Exists(is_poc) | Q(),
Q() | Exists(is_poc),
Q(Exists(is_poc)) & Q(),
Q() & Q(Exists(is_poc)),
Q(Exists(is_poc)) | Q(),
Q() | Q(Exists(is_poc)),
]
for conditions in tests:
with self.subTest(conditions):
self.assertCountEqual(Employee.objects.filter(conditions), [self.max])
def test_boolean_expression_in_Q(self):
is_poc = Company.objects.filter(point_of_contact=OuterRef("pk"))
self.gmbh.point_of_contact = self.max
self.gmbh.save()
self.assertCountEqual(Employee.objects.filter(Q(Exists(is_poc))), [self.max])
class IterableLookupInnerExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
ceo = Employee.objects.create(firstname="Just", lastname="Doit", salary=30)
# MySQL requires that the values calculated for expressions don't pass
# outside of the field's range, so it's inconvenient to use the values
# in the more general tests.
cls.c5020 = Company.objects.create(
name="5020 Ltd", num_employees=50, num_chairs=20, ceo=ceo
)
cls.c5040 = Company.objects.create(
name="5040 Ltd", num_employees=50, num_chairs=40, ceo=ceo
)
cls.c5050 = Company.objects.create(
name="5050 Ltd", num_employees=50, num_chairs=50, ceo=ceo
)
cls.c5060 = Company.objects.create(
name="5060 Ltd", num_employees=50, num_chairs=60, ceo=ceo
)
cls.c99300 = Company.objects.create(
name="99300 Ltd", num_employees=99, num_chairs=300, ceo=ceo
)
def test_in_lookup_allows_F_expressions_and_expressions_for_integers(self):
# __in lookups can use F() expressions for integers.
queryset = Company.objects.filter(num_employees__in=([F("num_chairs") - 10]))
self.assertSequenceEqual(queryset, [self.c5060])
self.assertCountEqual(
Company.objects.filter(
num_employees__in=([F("num_chairs") - 10, F("num_chairs") + 10])
),
[self.c5040, self.c5060],
)
self.assertCountEqual(
Company.objects.filter(
num_employees__in=(
[F("num_chairs") - 10, F("num_chairs"), F("num_chairs") + 10]
)
),
[self.c5040, self.c5050, self.c5060],
)
def test_expressions_in_lookups_join_choice(self):
midpoint = datetime.time(13, 0)
t1 = Time.objects.create(time=datetime.time(12, 0))
t2 = Time.objects.create(time=datetime.time(14, 0))
s1 = SimulationRun.objects.create(start=t1, end=t2, midpoint=midpoint)
SimulationRun.objects.create(start=t1, end=None, midpoint=midpoint)
SimulationRun.objects.create(start=None, end=t2, midpoint=midpoint)
SimulationRun.objects.create(start=None, end=None, midpoint=midpoint)
queryset = SimulationRun.objects.filter(
midpoint__range=[F("start__time"), F("end__time")]
)
self.assertSequenceEqual(queryset, [s1])
for alias in queryset.query.alias_map.values():
if isinstance(alias, Join):
self.assertEqual(alias.join_type, constants.INNER)
queryset = SimulationRun.objects.exclude(
midpoint__range=[F("start__time"), F("end__time")]
)
self.assertQuerysetEqual(queryset, [], ordered=False)
for alias in queryset.query.alias_map.values():
if isinstance(alias, Join):
self.assertEqual(alias.join_type, constants.LOUTER)
def test_range_lookup_allows_F_expressions_and_expressions_for_integers(self):
# Range lookups can use F() expressions for integers.
Company.objects.filter(num_employees__exact=F("num_chairs"))
self.assertCountEqual(
Company.objects.filter(num_employees__range=(F("num_chairs"), 100)),
[self.c5020, self.c5040, self.c5050],
)
self.assertCountEqual(
Company.objects.filter(
num_employees__range=(F("num_chairs") - 10, F("num_chairs") + 10)
),
[self.c5040, self.c5050, self.c5060],
)
self.assertCountEqual(
Company.objects.filter(num_employees__range=(F("num_chairs") - 10, 100)),
[self.c5020, self.c5040, self.c5050, self.c5060],
)
self.assertCountEqual(
Company.objects.filter(num_employees__range=(1, 100)),
[self.c5020, self.c5040, self.c5050, self.c5060, self.c99300],
)
def test_range_lookup_namedtuple(self):
EmployeeRange = namedtuple("EmployeeRange", ["minimum", "maximum"])
qs = Company.objects.filter(
num_employees__range=EmployeeRange(minimum=51, maximum=100),
)
self.assertSequenceEqual(qs, [self.c99300])
@unittest.skipUnless(
connection.vendor == "sqlite",
"This defensive test only works on databases that don't validate parameter "
"types",
)
def test_expressions_not_introduce_sql_injection_via_untrusted_string_inclusion(
self,
):
"""
This tests that SQL injection isn't possible using compilation of
expressions in iterable filters, as their compilation happens before
the main query compilation. It's limited to SQLite, as PostgreSQL,
Oracle and other vendors have defense in depth against this by type
checking. Testing against SQLite (the most permissive of the built-in
databases) demonstrates that the problem doesn't exist while keeping
the test simple.
"""
queryset = Company.objects.filter(name__in=[F("num_chairs") + "1)) OR ((1==1"])
self.assertQuerysetEqual(queryset, [], ordered=False)
def test_in_lookup_allows_F_expressions_and_expressions_for_datetimes(self):
start = datetime.datetime(2016, 2, 3, 15, 0, 0)
end = datetime.datetime(2016, 2, 5, 15, 0, 0)
experiment_1 = Experiment.objects.create(
name="Integrity testing",
assigned=start.date(),
start=start,
end=end,
completed=end.date(),
estimated_time=end - start,
)
experiment_2 = Experiment.objects.create(
name="Taste testing",
assigned=start.date(),
start=start,
end=end,
completed=end.date(),
estimated_time=end - start,
)
r1 = Result.objects.create(
experiment=experiment_1,
result_time=datetime.datetime(2016, 2, 4, 15, 0, 0),
)
Result.objects.create(
experiment=experiment_1,
result_time=datetime.datetime(2016, 3, 10, 2, 0, 0),
)
Result.objects.create(
experiment=experiment_2,
result_time=datetime.datetime(2016, 1, 8, 5, 0, 0),
)
within_experiment_time = [F("experiment__start"), F("experiment__end")]
queryset = Result.objects.filter(result_time__range=within_experiment_time)
self.assertSequenceEqual(queryset, [r1])
class FTests(SimpleTestCase):
def test_deepcopy(self):
f = F("foo")
g = deepcopy(f)
self.assertEqual(f.name, g.name)
def test_deconstruct(self):
f = F("name")
path, args, kwargs = f.deconstruct()
self.assertEqual(path, "django.db.models.F")
self.assertEqual(args, (f.name,))
self.assertEqual(kwargs, {})
def test_equal(self):
f = F("name")
same_f = F("name")
other_f = F("username")
self.assertEqual(f, same_f)
self.assertNotEqual(f, other_f)
def test_hash(self):
d = {F("name"): "Bob"}
self.assertIn(F("name"), d)
self.assertEqual(d[F("name")], "Bob")
def test_not_equal_Value(self):
f = F("name")
value = Value("name")
self.assertNotEqual(f, value)
self.assertNotEqual(value, f)
class ExpressionsTests(TestCase):
def test_F_reuse(self):
f = F("id")
n = Number.objects.create(integer=-1)
c = Company.objects.create(
name="Example Inc.",
num_employees=2300,
num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith"),
)
c_qs = Company.objects.filter(id=f)
self.assertEqual(c_qs.get(), c)
# Reuse the same F-object for another queryset
n_qs = Number.objects.filter(id=f)
self.assertEqual(n_qs.get(), n)
# The original query still works correctly
self.assertEqual(c_qs.get(), c)
def test_patterns_escape(self):
r"""
Special characters (e.g. %, _ and \) stored in database are
properly escaped when using a pattern lookup with an expression
refs #16731
"""
Employee.objects.bulk_create(
[
Employee(firstname="Johnny", lastname="%John"),
Employee(firstname="Jean-Claude", lastname="Claud_"),
Employee(firstname="Jean-Claude", lastname="Claude%"),
Employee(firstname="Johnny", lastname="Joh\\n"),
Employee(firstname="Johnny", lastname="_ohn"),
]
)
claude = Employee.objects.create(firstname="Jean-Claude", lastname="Claude")
john = Employee.objects.create(firstname="Johnny", lastname="John")
john_sign = Employee.objects.create(firstname="%Joh\\nny", lastname="%Joh\\n")
self.assertCountEqual(
Employee.objects.filter(firstname__contains=F("lastname")),
[john_sign, john, claude],
)
self.assertCountEqual(
Employee.objects.filter(firstname__startswith=F("lastname")),
[john_sign, john],
)
self.assertSequenceEqual(
Employee.objects.filter(firstname__endswith=F("lastname")),
[claude],
)
def test_insensitive_patterns_escape(self):
r"""
Special characters (e.g. %, _ and \) stored in database are
properly escaped when using a case insensitive pattern lookup with an
expression -- refs #16731
"""
Employee.objects.bulk_create(
[
Employee(firstname="Johnny", lastname="%john"),
Employee(firstname="Jean-Claude", lastname="claud_"),
Employee(firstname="Jean-Claude", lastname="claude%"),
Employee(firstname="Johnny", lastname="joh\\n"),
Employee(firstname="Johnny", lastname="_ohn"),
]
)
claude = Employee.objects.create(firstname="Jean-Claude", lastname="claude")
john = Employee.objects.create(firstname="Johnny", lastname="john")
john_sign = Employee.objects.create(firstname="%Joh\\nny", lastname="%joh\\n")
self.assertCountEqual(
Employee.objects.filter(firstname__icontains=F("lastname")),
[john_sign, john, claude],
)
self.assertCountEqual(
Employee.objects.filter(firstname__istartswith=F("lastname")),
[john_sign, john],
)
self.assertSequenceEqual(
Employee.objects.filter(firstname__iendswith=F("lastname")),
[claude],
)
@isolate_apps("expressions")
class SimpleExpressionTests(SimpleTestCase):
def test_equal(self):
self.assertEqual(Expression(), Expression())
self.assertEqual(
Expression(IntegerField()), Expression(output_field=IntegerField())
)
self.assertEqual(Expression(IntegerField()), mock.ANY)
self.assertNotEqual(Expression(IntegerField()), Expression(CharField()))
class TestModel(Model):
field = IntegerField()
other_field = IntegerField()
self.assertNotEqual(
Expression(TestModel._meta.get_field("field")),
Expression(TestModel._meta.get_field("other_field")),
)
def test_hash(self):
self.assertEqual(hash(Expression()), hash(Expression()))
self.assertEqual(
hash(Expression(IntegerField())),
hash(Expression(output_field=IntegerField())),
)
self.assertNotEqual(
hash(Expression(IntegerField())),
hash(Expression(CharField())),
)
class TestModel(Model):
field = IntegerField()
other_field = IntegerField()
self.assertNotEqual(
hash(Expression(TestModel._meta.get_field("field"))),
hash(Expression(TestModel._meta.get_field("other_field"))),
)
class ExpressionsNumericTests(TestCase):
@classmethod
def setUpTestData(cls):
Number(integer=-1).save()
Number(integer=42).save()
Number(integer=1337).save()
Number.objects.update(float=F("integer"))
def test_fill_with_value_from_same_object(self):
"""
We can fill a value in all objects with an other value of the
same object.
"""
self.assertQuerysetEqual(
Number.objects.all(),
[(-1, -1), (42, 42), (1337, 1337)],
lambda n: (n.integer, round(n.float)),
ordered=False,
)
def test_increment_value(self):
"""
We can increment a value of all objects in a query set.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0).update(integer=F("integer") + 1), 2
)
self.assertQuerysetEqual(
Number.objects.all(),
[(-1, -1), (43, 42), (1338, 1337)],
lambda n: (n.integer, round(n.float)),
ordered=False,
)
def test_filter_not_equals_other_field(self):
"""
We can filter for objects, where a value is not equals the value
of an other field.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0).update(integer=F("integer") + 1), 2
)
self.assertQuerysetEqual(
Number.objects.exclude(float=F("integer")),
[(43, 42), (1338, 1337)],
lambda n: (n.integer, round(n.float)),
ordered=False,
)
def test_filter_decimal_expression(self):
obj = Number.objects.create(integer=0, float=1, decimal_value=Decimal("1"))
qs = Number.objects.annotate(
x=ExpressionWrapper(Value(1), output_field=DecimalField()),
).filter(Q(x=1, integer=0) & Q(x=Decimal("1")))
self.assertSequenceEqual(qs, [obj])
def test_complex_expressions(self):
"""
Complex expressions of different connection types are possible.
"""
n = Number.objects.create(integer=10, float=123.45)
self.assertEqual(
Number.objects.filter(pk=n.pk).update(float=F("integer") + F("float") * 2),
1,
)
self.assertEqual(Number.objects.get(pk=n.pk).integer, 10)
self.assertEqual(
Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3)
)
def test_decimal_expression(self):
n = Number.objects.create(integer=1, decimal_value=Decimal("0.5"))
n.decimal_value = F("decimal_value") - Decimal("0.4")
n.save()
n.refresh_from_db()
self.assertEqual(n.decimal_value, Decimal("0.1"))
class ExpressionOperatorTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n = Number.objects.create(integer=42, float=15.5)
cls.n1 = Number.objects.create(integer=-42, float=-15.5)
def test_lefthand_addition(self):
# LH Addition of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") + 15, float=F("float") + 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)
)
def test_lefthand_subtraction(self):
# LH Subtraction of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") - 15, float=F("float") - 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3)
)
def test_lefthand_multiplication(self):
# Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") * 15, float=F("float") * 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)
)
def test_lefthand_division(self):
# LH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") / 2, float=F("float") / 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3)
)
def test_lefthand_modulo(self):
# LH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=F("integer") % 20)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2)
def test_lefthand_modulo_null(self):
# LH Modulo arithmetic on integers.
Employee.objects.create(firstname="John", lastname="Doe", salary=None)
qs = Employee.objects.annotate(modsalary=F("salary") % 20)
self.assertIsNone(qs.get().salary)
def test_lefthand_bitwise_and(self):
# LH Bitwise ands on integers
Number.objects.filter(pk=self.n.pk).update(integer=F("integer").bitand(56))
Number.objects.filter(pk=self.n1.pk).update(integer=F("integer").bitand(-56))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -64)
def test_lefthand_bitwise_left_shift_operator(self):
Number.objects.update(integer=F("integer").bitleftshift(2))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 168)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -168)
def test_lefthand_bitwise_right_shift_operator(self):
Number.objects.update(integer=F("integer").bitrightshift(2))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 10)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -11)
def test_lefthand_bitwise_or(self):
# LH Bitwise or on integers
Number.objects.update(integer=F("integer").bitor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -10)
def test_lefthand_transformed_field_bitwise_or(self):
Employee.objects.create(firstname="Max", lastname="Mustermann")
with register_lookup(CharField, Length):
qs = Employee.objects.annotate(bitor=F("lastname__length").bitor(48))
self.assertEqual(qs.get().bitor, 58)
def test_lefthand_power(self):
# LH Power arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") ** 2, float=F("float") ** 1.5
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2)
)
def test_lefthand_bitwise_xor(self):
Number.objects.update(integer=F("integer").bitxor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 26)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -26)
def test_lefthand_bitwise_xor_null(self):
employee = Employee.objects.create(firstname="John", lastname="Doe")
Employee.objects.update(salary=F("salary").bitxor(48))
employee.refresh_from_db()
self.assertIsNone(employee.salary)
def test_lefthand_bitwise_xor_right_null(self):
employee = Employee.objects.create(firstname="John", lastname="Doe", salary=48)
Employee.objects.update(salary=F("salary").bitxor(None))
employee.refresh_from_db()
self.assertIsNone(employee.salary)
@unittest.skipUnless(
connection.vendor == "oracle", "Oracle doesn't support bitwise XOR."
)
def test_lefthand_bitwise_xor_not_supported(self):
msg = "Bitwise XOR is not supported in Oracle."
with self.assertRaisesMessage(NotSupportedError, msg):
Number.objects.update(integer=F("integer").bitxor(48))
def test_right_hand_addition(self):
# Right hand operators
Number.objects.filter(pk=self.n.pk).update(
integer=15 + F("integer"), float=42.7 + F("float")
)
# RH Addition of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)
)
def test_right_hand_subtraction(self):
Number.objects.filter(pk=self.n.pk).update(
integer=15 - F("integer"), float=42.7 - F("float")
)
# RH Subtraction of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3)
)
def test_right_hand_multiplication(self):
# RH Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=15 * F("integer"), float=42.7 * F("float")
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)
)
def test_right_hand_division(self):
# RH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=640 / F("integer"), float=42.7 / F("float")
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3)
)
def test_right_hand_modulo(self):
# RH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=69 % F("integer"))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
def test_righthand_power(self):
# RH Power arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=2 ** F("integer"), float=1.5 ** F("float")
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3)
)
class FTimeDeltaTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.sday = sday = datetime.date(2010, 6, 25)
cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
midnight = datetime.time(0)
delta0 = datetime.timedelta(0)
delta1 = datetime.timedelta(microseconds=253000)
delta2 = datetime.timedelta(seconds=44)
delta3 = datetime.timedelta(hours=21, minutes=8)
delta4 = datetime.timedelta(days=10)
delta5 = datetime.timedelta(days=90)
# Test data is set so that deltas and delays will be
# strictly increasing.
cls.deltas = []
cls.delays = []
cls.days_long = []
# e0: started same day as assigned, zero duration
end = stime + delta0
cls.e0 = Experiment.objects.create(
name="e0",
assigned=sday,
start=stime,
end=end,
completed=end.date(),
estimated_time=delta0,
)
cls.deltas.append(delta0)
cls.delays.append(
cls.e0.start - datetime.datetime.combine(cls.e0.assigned, midnight)
)
cls.days_long.append(cls.e0.completed - cls.e0.assigned)
# e1: started one day after assigned, tiny duration, data
# set so that end time has no fractional seconds, which
# tests an edge case on sqlite.
delay = datetime.timedelta(1)
end = stime + delay + delta1
e1 = Experiment.objects.create(
name="e1",
assigned=sday,
start=stime + delay,
end=end,
completed=end.date(),
estimated_time=delta1,
)
cls.deltas.append(delta1)
cls.delays.append(e1.start - datetime.datetime.combine(e1.assigned, midnight))
cls.days_long.append(e1.completed - e1.assigned)
# e2: started three days after assigned, small duration
end = stime + delta2
e2 = Experiment.objects.create(
name="e2",
assigned=sday - datetime.timedelta(3),
start=stime,
end=end,
completed=end.date(),
estimated_time=datetime.timedelta(hours=1),
)
cls.deltas.append(delta2)
cls.delays.append(e2.start - datetime.datetime.combine(e2.assigned, midnight))
cls.days_long.append(e2.completed - e2.assigned)
# e3: started four days after assigned, medium duration
delay = datetime.timedelta(4)
end = stime + delay + delta3
e3 = Experiment.objects.create(
name="e3",
assigned=sday,
start=stime + delay,
end=end,
completed=end.date(),
estimated_time=delta3,
)
cls.deltas.append(delta3)
cls.delays.append(e3.start - datetime.datetime.combine(e3.assigned, midnight))
cls.days_long.append(e3.completed - e3.assigned)
# e4: started 10 days after assignment, long duration
end = stime + delta4
e4 = Experiment.objects.create(
name="e4",
assigned=sday - datetime.timedelta(10),
start=stime,
end=end,
completed=end.date(),
estimated_time=delta4 - datetime.timedelta(1),
)
cls.deltas.append(delta4)
cls.delays.append(e4.start - datetime.datetime.combine(e4.assigned, midnight))
cls.days_long.append(e4.completed - e4.assigned)
# e5: started a month after assignment, very long duration
delay = datetime.timedelta(30)
end = stime + delay + delta5
e5 = Experiment.objects.create(
name="e5",
assigned=sday,
start=stime + delay,
end=end,
completed=end.date(),
estimated_time=delta5,
)
cls.deltas.append(delta5)
cls.delays.append(e5.start - datetime.datetime.combine(e5.assigned, midnight))
cls.days_long.append(e5.completed - e5.assigned)
cls.expnames = [e.name for e in Experiment.objects.all()]
def test_multiple_query_compilation(self):
# Ticket #21643
queryset = Experiment.objects.filter(
end__lt=F("start") + datetime.timedelta(hours=1)
)
q1 = str(queryset.query)
q2 = str(queryset.query)
self.assertEqual(q1, q2)
def test_query_clone(self):
# Ticket #21643 - Crash when compiling query more than once
qs = Experiment.objects.filter(end__lt=F("start") + datetime.timedelta(hours=1))
qs2 = qs.all()
list(qs)
list(qs2)
# Intentionally no assert
def test_delta_add(self):
for i, delta in enumerate(self.deltas):
test_set = [
e.name for e in Experiment.objects.filter(end__lt=F("start") + delta)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name for e in Experiment.objects.filter(end__lt=delta + F("start"))
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name for e in Experiment.objects.filter(end__lte=F("start") + delta)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_delta_subtract(self):
for i, delta in enumerate(self.deltas):
test_set = [
e.name for e in Experiment.objects.filter(start__gt=F("end") - delta)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name for e in Experiment.objects.filter(start__gte=F("end") - delta)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_exclude(self):
for i, delta in enumerate(self.deltas):
test_set = [
e.name for e in Experiment.objects.exclude(end__lt=F("start") + delta)
]
self.assertEqual(test_set, self.expnames[i:])
test_set = [
e.name for e in Experiment.objects.exclude(end__lte=F("start") + delta)
]
self.assertEqual(test_set, self.expnames[i + 1 :])
def test_date_comparison(self):
for i, days in enumerate(self.days_long):
test_set = [
e.name
for e in Experiment.objects.filter(completed__lt=F("assigned") + days)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name
for e in Experiment.objects.filter(completed__lte=F("assigned") + days)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_datetime_and_durationfield_addition_with_filter(self):
test_set = Experiment.objects.filter(end=F("start") + F("estimated_time"))
self.assertGreater(test_set.count(), 0)
self.assertEqual(
[e.name for e in test_set],
[
e.name
for e in Experiment.objects.all()
if e.end == e.start + e.estimated_time
],
)
def test_datetime_and_duration_field_addition_with_annotate_and_no_output_field(
self,
):
test_set = Experiment.objects.annotate(
estimated_end=F("start") + F("estimated_time")
)
self.assertEqual(
[e.estimated_end for e in test_set],
[e.start + e.estimated_time for e in test_set],
)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_datetime_subtraction_with_annotate_and_no_output_field(self):
test_set = Experiment.objects.annotate(
calculated_duration=F("end") - F("start")
)
self.assertEqual(
[e.calculated_duration for e in test_set],
[e.end - e.start for e in test_set],
)
def test_mixed_comparisons1(self):
for i, delay in enumerate(self.delays):
test_set = [
e.name
for e in Experiment.objects.filter(assigned__gt=F("start") - delay)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name
for e in Experiment.objects.filter(assigned__gte=F("start") - delay)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_mixed_comparisons2(self):
for i, delay in enumerate(self.delays):
delay = datetime.timedelta(delay.days)
test_set = [
e.name
for e in Experiment.objects.filter(start__lt=F("assigned") + delay)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name
for e in Experiment.objects.filter(
start__lte=F("assigned") + delay + datetime.timedelta(1)
)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_delta_update(self):
for delta in self.deltas:
exps = Experiment.objects.all()
expected_durations = [e.duration() for e in exps]
expected_starts = [e.start + delta for e in exps]
expected_ends = [e.end + delta for e in exps]
Experiment.objects.update(start=F("start") + delta, end=F("end") + delta)
exps = Experiment.objects.all()
new_starts = [e.start for e in exps]
new_ends = [e.end for e in exps]
new_durations = [e.duration() for e in exps]
self.assertEqual(expected_starts, new_starts)
self.assertEqual(expected_ends, new_ends)
self.assertEqual(expected_durations, new_durations)
def test_invalid_operator(self):
with self.assertRaises(DatabaseError):
list(Experiment.objects.filter(start=F("start") * datetime.timedelta(0)))
def test_durationfield_add(self):
zeros = [
e.name
for e in Experiment.objects.filter(start=F("start") + F("estimated_time"))
]
self.assertEqual(zeros, ["e0"])
end_less = [
e.name
for e in Experiment.objects.filter(end__lt=F("start") + F("estimated_time"))
]
self.assertEqual(end_less, ["e2"])
delta_math = [
e.name
for e in Experiment.objects.filter(
end__gte=F("start") + F("estimated_time") + datetime.timedelta(hours=1)
)
]
self.assertEqual(delta_math, ["e4"])
queryset = Experiment.objects.annotate(
shifted=ExpressionWrapper(
F("start") + Value(None, output_field=DurationField()),
output_field=DateTimeField(),
)
)
self.assertIsNone(queryset.first().shifted)
def test_durationfield_multiply_divide(self):
Experiment.objects.update(scalar=2)
tests = [
(Decimal("2"), 2),
(F("scalar"), 2),
(2, 2),
(3.2, 3.2),
]
for expr, scalar in tests:
with self.subTest(expr=expr):
qs = Experiment.objects.annotate(
multiplied=ExpressionWrapper(
expr * F("estimated_time"),
output_field=DurationField(),
),
divided=ExpressionWrapper(
F("estimated_time") / expr,
output_field=DurationField(),
),
)
for experiment in qs:
self.assertEqual(
experiment.multiplied,
experiment.estimated_time * scalar,
)
self.assertEqual(
experiment.divided,
experiment.estimated_time / scalar,
)
def test_duration_expressions(self):
for delta in self.deltas:
qs = Experiment.objects.annotate(duration=F("estimated_time") + delta)
for obj in qs:
self.assertEqual(obj.duration, obj.estimated_time + delta)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_date_subtraction(self):
queryset = Experiment.objects.annotate(
completion_duration=F("completed") - F("assigned"),
)
at_least_5_days = {
e.name
for e in queryset.filter(
completion_duration__gte=datetime.timedelta(days=5)
)
}
self.assertEqual(at_least_5_days, {"e3", "e4", "e5"})
at_least_120_days = {
e.name
for e in queryset.filter(
completion_duration__gte=datetime.timedelta(days=120)
)
}
self.assertEqual(at_least_120_days, {"e5"})
less_than_5_days = {
e.name
for e in queryset.filter(completion_duration__lt=datetime.timedelta(days=5))
}
self.assertEqual(less_than_5_days, {"e0", "e1", "e2"})
queryset = Experiment.objects.annotate(
difference=F("completed") - Value(None, output_field=DateField()),
)
self.assertIsNone(queryset.first().difference)
queryset = Experiment.objects.annotate(
shifted=ExpressionWrapper(
F("completed") - Value(None, output_field=DurationField()),
output_field=DateField(),
)
)
self.assertIsNone(queryset.first().shifted)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_date_subquery_subtraction(self):
subquery = Experiment.objects.filter(pk=OuterRef("pk")).values("completed")
queryset = Experiment.objects.annotate(
difference=subquery - F("completed"),
).filter(difference=datetime.timedelta())
self.assertTrue(queryset.exists())
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_date_case_subtraction(self):
queryset = Experiment.objects.annotate(
date_case=Case(
When(Q(name="e0"), then=F("completed")),
output_field=DateField(),
),
completed_value=Value(
self.e0.completed,
output_field=DateField(),
),
difference=F("date_case") - F("completed_value"),
).filter(difference=datetime.timedelta())
self.assertEqual(queryset.get(), self.e0)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_time_subtraction(self):
Time.objects.create(time=datetime.time(12, 30, 15, 2345))
queryset = Time.objects.annotate(
difference=F("time") - Value(datetime.time(11, 15, 0)),
)
self.assertEqual(
queryset.get().difference,
datetime.timedelta(hours=1, minutes=15, seconds=15, microseconds=2345),
)
queryset = Time.objects.annotate(
difference=F("time") - Value(None, output_field=TimeField()),
)
self.assertIsNone(queryset.first().difference)
queryset = Time.objects.annotate(
shifted=ExpressionWrapper(
F("time") - Value(None, output_field=DurationField()),
output_field=TimeField(),
)
)
self.assertIsNone(queryset.first().shifted)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_time_subquery_subtraction(self):
Time.objects.create(time=datetime.time(12, 30, 15, 2345))
subquery = Time.objects.filter(pk=OuterRef("pk")).values("time")
queryset = Time.objects.annotate(
difference=subquery - F("time"),
).filter(difference=datetime.timedelta())
self.assertTrue(queryset.exists())
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_datetime_subtraction(self):
under_estimate = [
e.name
for e in Experiment.objects.filter(estimated_time__gt=F("end") - F("start"))
]
self.assertEqual(under_estimate, ["e2"])
over_estimate = [
e.name
for e in Experiment.objects.filter(estimated_time__lt=F("end") - F("start"))
]
self.assertEqual(over_estimate, ["e4"])
queryset = Experiment.objects.annotate(
difference=F("start") - Value(None, output_field=DateTimeField()),
)
self.assertIsNone(queryset.first().difference)
queryset = Experiment.objects.annotate(
shifted=ExpressionWrapper(
F("start") - Value(None, output_field=DurationField()),
output_field=DateTimeField(),
)
)
self.assertIsNone(queryset.first().shifted)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_datetime_subquery_subtraction(self):
subquery = Experiment.objects.filter(pk=OuterRef("pk")).values("start")
queryset = Experiment.objects.annotate(
difference=subquery - F("start"),
).filter(difference=datetime.timedelta())
self.assertTrue(queryset.exists())
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_datetime_subtraction_microseconds(self):
delta = datetime.timedelta(microseconds=8999999999999999)
Experiment.objects.update(end=F("start") + delta)
qs = Experiment.objects.annotate(delta=F("end") - F("start"))
for e in qs:
self.assertEqual(e.delta, delta)
def test_duration_with_datetime(self):
# Exclude e1 which has very high precision so we can test this on all
# backends regardless of whether or not it supports
# microsecond_precision.
over_estimate = (
Experiment.objects.exclude(name="e1")
.filter(
completed__gt=self.stime + F("estimated_time"),
)
.order_by("name")
)
self.assertQuerysetEqual(over_estimate, ["e3", "e4", "e5"], lambda e: e.name)
def test_duration_with_datetime_microseconds(self):
delta = datetime.timedelta(microseconds=8999999999999999)
qs = Experiment.objects.annotate(
dt=ExpressionWrapper(
F("start") + delta,
output_field=DateTimeField(),
)
)
for e in qs:
self.assertEqual(e.dt, e.start + delta)
def test_date_minus_duration(self):
more_than_4_days = Experiment.objects.filter(
assigned__lt=F("completed") - Value(datetime.timedelta(days=4))
)
self.assertQuerysetEqual(more_than_4_days, ["e3", "e4", "e5"], lambda e: e.name)
def test_negative_timedelta_update(self):
# subtract 30 seconds, 30 minutes, 2 hours and 2 days
experiments = (
Experiment.objects.filter(name="e0")
.annotate(
start_sub_seconds=F("start") + datetime.timedelta(seconds=-30),
)
.annotate(
start_sub_minutes=F("start_sub_seconds")
+ datetime.timedelta(minutes=-30),
)
.annotate(
start_sub_hours=F("start_sub_minutes") + datetime.timedelta(hours=-2),
)
.annotate(
new_start=F("start_sub_hours") + datetime.timedelta(days=-2),
)
)
expected_start = datetime.datetime(2010, 6, 23, 9, 45, 0)
# subtract 30 microseconds
experiments = experiments.annotate(
new_start=F("new_start") + datetime.timedelta(microseconds=-30)
)
expected_start += datetime.timedelta(microseconds=+746970)
experiments.update(start=F("new_start"))
e0 = Experiment.objects.get(name="e0")
self.assertEqual(e0.start, expected_start)
class ValueTests(TestCase):
def test_update_TimeField_using_Value(self):
Time.objects.create()
Time.objects.update(time=Value(datetime.time(1), output_field=TimeField()))
self.assertEqual(Time.objects.get().time, datetime.time(1))
def test_update_UUIDField_using_Value(self):
UUID.objects.create()
UUID.objects.update(
uuid=Value(
uuid.UUID("12345678901234567890123456789012"), output_field=UUIDField()
)
)
self.assertEqual(
UUID.objects.get().uuid, uuid.UUID("12345678901234567890123456789012")
)
def test_deconstruct(self):
value = Value("name")
path, args, kwargs = value.deconstruct()
self.assertEqual(path, "django.db.models.Value")
self.assertEqual(args, (value.value,))
self.assertEqual(kwargs, {})
def test_deconstruct_output_field(self):
value = Value("name", output_field=CharField())
path, args, kwargs = value.deconstruct()
self.assertEqual(path, "django.db.models.Value")
self.assertEqual(args, (value.value,))
self.assertEqual(len(kwargs), 1)
self.assertEqual(
kwargs["output_field"].deconstruct(), CharField().deconstruct()
)
def test_repr(self):
tests = [
(None, "Value(None)"),
("str", "Value('str')"),
(True, "Value(True)"),
(42, "Value(42)"),
(
datetime.datetime(2019, 5, 15),
"Value(datetime.datetime(2019, 5, 15, 0, 0))",
),
(Decimal("3.14"), "Value(Decimal('3.14'))"),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertEqual(repr(Value(value)), expected)
def test_equal(self):
value = Value("name")
self.assertEqual(value, Value("name"))
self.assertNotEqual(value, Value("username"))
def test_hash(self):
d = {Value("name"): "Bob"}
self.assertIn(Value("name"), d)
self.assertEqual(d[Value("name")], "Bob")
def test_equal_output_field(self):
value = Value("name", output_field=CharField())
same_value = Value("name", output_field=CharField())
other_value = Value("name", output_field=TimeField())
no_output_field = Value("name")
self.assertEqual(value, same_value)
self.assertNotEqual(value, other_value)
self.assertNotEqual(value, no_output_field)
def test_raise_empty_expressionlist(self):
msg = "ExpressionList requires at least one expression"
with self.assertRaisesMessage(ValueError, msg):
ExpressionList()
def test_compile_unresolved(self):
# This test might need to be revisited later on if #25425 is enforced.
compiler = Time.objects.all().query.get_compiler(connection=connection)
value = Value("foo")
self.assertEqual(value.as_sql(compiler, connection), ("%s", ["foo"]))
value = Value("foo", output_field=CharField())
self.assertEqual(value.as_sql(compiler, connection), ("%s", ["foo"]))
def test_output_field_decimalfield(self):
Time.objects.create()
time = Time.objects.annotate(one=Value(1, output_field=DecimalField())).first()
self.assertEqual(time.one, 1)
def test_resolve_output_field(self):
value_types = [
("str", CharField),
(True, BooleanField),
(42, IntegerField),
(3.14, FloatField),
(datetime.date(2019, 5, 15), DateField),
(datetime.datetime(2019, 5, 15), DateTimeField),
(datetime.time(3, 16), TimeField),
(datetime.timedelta(1), DurationField),
(Decimal("3.14"), DecimalField),
(b"", BinaryField),
(uuid.uuid4(), UUIDField),
]
for value, output_field_type in value_types:
with self.subTest(type=type(value)):
expr = Value(value)
self.assertIsInstance(expr.output_field, output_field_type)
def test_resolve_output_field_failure(self):
msg = "Cannot resolve expression type, unknown output_field"
with self.assertRaisesMessage(FieldError, msg):
Value(object()).output_field
def test_output_field_does_not_create_broken_validators(self):
"""
The output field for a given Value doesn't get cleaned & validated,
however validators may still be instantiated for a given field type
and this demonstrates that they don't throw an exception.
"""
value_types = [
"str",
True,
42,
3.14,
datetime.date(2019, 5, 15),
datetime.datetime(2019, 5, 15),
datetime.time(3, 16),
datetime.timedelta(1),
Decimal("3.14"),
b"",
uuid.uuid4(),
]
for value in value_types:
with self.subTest(type=type(value)):
field = Value(value)._resolve_output_field()
field.clean(value, model_instance=None)
class ExistsTests(TestCase):
def test_optimizations(self):
with CaptureQueriesContext(connection) as context:
list(
Experiment.objects.values(
exists=Exists(
Experiment.objects.order_by("pk"),
)
).order_by()
)
captured_queries = context.captured_queries
self.assertEqual(len(captured_queries), 1)
captured_sql = captured_queries[0]["sql"]
self.assertNotIn(
connection.ops.quote_name(Experiment._meta.pk.column),
captured_sql,
)
self.assertIn(
connection.ops.limit_offset_sql(None, 1),
captured_sql,
)
self.assertNotIn("ORDER BY", captured_sql)
def test_negated_empty_exists(self):
manager = Manager.objects.create()
qs = Manager.objects.filter(~Exists(Manager.objects.none()) & Q(pk=manager.pk))
self.assertSequenceEqual(qs, [manager])
def test_select_negated_empty_exists(self):
manager = Manager.objects.create()
qs = Manager.objects.annotate(
not_exists=~Exists(Manager.objects.none())
).filter(pk=manager.pk)
self.assertSequenceEqual(qs, [manager])
self.assertIs(qs.get().not_exists, True)
class FieldTransformTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.sday = sday = datetime.date(2010, 6, 25)
cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
cls.ex1 = Experiment.objects.create(
name="Experiment 1",
assigned=sday,
completed=sday + datetime.timedelta(2),
estimated_time=datetime.timedelta(2),
start=stime,
end=stime + datetime.timedelta(2),
)
def test_month_aggregation(self):
self.assertEqual(
Experiment.objects.aggregate(month_count=Count("assigned__month")),
{"month_count": 1},
)
def test_transform_in_values(self):
self.assertSequenceEqual(
Experiment.objects.values("assigned__month"),
[{"assigned__month": 6}],
)
def test_multiple_transforms_in_values(self):
self.assertSequenceEqual(
Experiment.objects.values("end__date__month"),
[{"end__date__month": 6}],
)
class ReprTests(SimpleTestCase):
def test_expressions(self):
self.assertEqual(
repr(Case(When(a=1))),
"<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>",
)
self.assertEqual(
repr(When(Q(age__gte=18), then=Value("legal"))),
"<When: WHEN <Q: (AND: ('age__gte', 18))> THEN Value('legal')>",
)
self.assertEqual(repr(Col("alias", "field")), "Col(alias, field)")
self.assertEqual(repr(F("published")), "F(published)")
self.assertEqual(
repr(F("cost") + F("tax")), "<CombinedExpression: F(cost) + F(tax)>"
)
self.assertEqual(
repr(ExpressionWrapper(F("cost") + F("tax"), IntegerField())),
"ExpressionWrapper(F(cost) + F(tax))",
)
self.assertEqual(
repr(Func("published", function="TO_CHAR")),
"Func(F(published), function=TO_CHAR)",
)
self.assertEqual(repr(OrderBy(Value(1))), "OrderBy(Value(1), descending=False)")
self.assertEqual(repr(RawSQL("table.col", [])), "RawSQL(table.col, [])")
self.assertEqual(
repr(Ref("sum_cost", Sum("cost"))), "Ref(sum_cost, Sum(F(cost)))"
)
self.assertEqual(repr(Value(1)), "Value(1)")
self.assertEqual(
repr(ExpressionList(F("col"), F("anothercol"))),
"ExpressionList(F(col), F(anothercol))",
)
self.assertEqual(
repr(ExpressionList(OrderBy(F("col"), descending=False))),
"ExpressionList(OrderBy(F(col), descending=False))",
)
def test_functions(self):
self.assertEqual(repr(Coalesce("a", "b")), "Coalesce(F(a), F(b))")
self.assertEqual(repr(Concat("a", "b")), "Concat(ConcatPair(F(a), F(b)))")
self.assertEqual(repr(Length("a")), "Length(F(a))")
self.assertEqual(repr(Lower("a")), "Lower(F(a))")
self.assertEqual(repr(Substr("a", 1, 3)), "Substr(F(a), Value(1), Value(3))")
self.assertEqual(repr(Upper("a")), "Upper(F(a))")
def test_aggregates(self):
self.assertEqual(repr(Avg("a")), "Avg(F(a))")
self.assertEqual(repr(Count("a")), "Count(F(a))")
self.assertEqual(repr(Count("*")), "Count('*')")
self.assertEqual(repr(Max("a")), "Max(F(a))")
self.assertEqual(repr(Min("a")), "Min(F(a))")
self.assertEqual(repr(StdDev("a")), "StdDev(F(a), sample=False)")
self.assertEqual(repr(Sum("a")), "Sum(F(a))")
self.assertEqual(
repr(Variance("a", sample=True)), "Variance(F(a), sample=True)"
)
def test_distinct_aggregates(self):
self.assertEqual(repr(Count("a", distinct=True)), "Count(F(a), distinct=True)")
self.assertEqual(repr(Count("*", distinct=True)), "Count('*', distinct=True)")
def test_filtered_aggregates(self):
filter = Q(a=1)
self.assertEqual(
repr(Avg("a", filter=filter)), "Avg(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(Count("a", filter=filter)), "Count(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(Max("a", filter=filter)), "Max(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(Min("a", filter=filter)), "Min(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(StdDev("a", filter=filter)),
"StdDev(F(a), filter=(AND: ('a', 1)), sample=False)",
)
self.assertEqual(
repr(Sum("a", filter=filter)), "Sum(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(Variance("a", sample=True, filter=filter)),
"Variance(F(a), filter=(AND: ('a', 1)), sample=True)",
)
self.assertEqual(
repr(Count("a", filter=filter, distinct=True)),
"Count(F(a), distinct=True, filter=(AND: ('a', 1)))",
)
class CombinableTests(SimpleTestCase):
bitwise_msg = (
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def test_negation(self):
c = Combinable()
self.assertEqual(-c, c * -1)
def test_and(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
Combinable() & Combinable()
def test_or(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
Combinable() | Combinable()
def test_xor(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
Combinable() ^ Combinable()
def test_reversed_and(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
object() & Combinable()
def test_reversed_or(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
object() | Combinable()
def test_reversed_xor(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
object() ^ Combinable()
class CombinedExpressionTests(SimpleTestCase):
def test_resolve_output_field_number(self):
tests = [
(IntegerField, AutoField, IntegerField),
(AutoField, IntegerField, IntegerField),
(IntegerField, DecimalField, DecimalField),
(DecimalField, IntegerField, DecimalField),
(IntegerField, FloatField, FloatField),
(FloatField, IntegerField, FloatField),
]
connectors = [
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
Combinable.DIV,
Combinable.MOD,
]
for lhs, rhs, combined in tests:
for connector in connectors:
with self.subTest(
lhs=lhs, connector=connector, rhs=rhs, combined=combined
):
expr = CombinedExpression(
Expression(lhs()),
connector,
Expression(rhs()),
)
self.assertIsInstance(expr.output_field, combined)
def test_resolve_output_field_with_null(self):
def null():
return Value(None)
tests = [
# Numbers.
(AutoField, Combinable.ADD, null),
(DecimalField, Combinable.ADD, null),
(FloatField, Combinable.ADD, null),
(IntegerField, Combinable.ADD, null),
(IntegerField, Combinable.SUB, null),
(null, Combinable.ADD, IntegerField),
# Dates.
(DateField, Combinable.ADD, null),
(DateTimeField, Combinable.ADD, null),
(DurationField, Combinable.ADD, null),
(TimeField, Combinable.ADD, null),
(TimeField, Combinable.SUB, null),
(null, Combinable.ADD, DateTimeField),
(DateField, Combinable.SUB, null),
]
for lhs, connector, rhs in tests:
msg = (
f"Cannot infer type of {connector!r} expression involving these types: "
)
with self.subTest(lhs=lhs, connector=connector, rhs=rhs):
expr = CombinedExpression(
Expression(lhs()),
connector,
Expression(rhs()),
)
with self.assertRaisesMessage(FieldError, msg):
expr.output_field
def test_resolve_output_field_dates(self):
tests = [
# Add - same type.
(DateField, Combinable.ADD, DateField, FieldError),
(DateTimeField, Combinable.ADD, DateTimeField, FieldError),
(TimeField, Combinable.ADD, TimeField, FieldError),
(DurationField, Combinable.ADD, DurationField, DurationField),
# Add - different type.
(DateField, Combinable.ADD, DurationField, DateTimeField),
(DateTimeField, Combinable.ADD, DurationField, DateTimeField),
(TimeField, Combinable.ADD, DurationField, TimeField),
(DurationField, Combinable.ADD, DateField, DateTimeField),
(DurationField, Combinable.ADD, DateTimeField, DateTimeField),
(DurationField, Combinable.ADD, TimeField, TimeField),
# Subtract - same type.
(DateField, Combinable.SUB, DateField, DurationField),
(DateTimeField, Combinable.SUB, DateTimeField, DurationField),
(TimeField, Combinable.SUB, TimeField, DurationField),
(DurationField, Combinable.SUB, DurationField, DurationField),
# Subtract - different type.
(DateField, Combinable.SUB, DurationField, DateTimeField),
(DateTimeField, Combinable.SUB, DurationField, DateTimeField),
(TimeField, Combinable.SUB, DurationField, TimeField),
(DurationField, Combinable.SUB, DateField, FieldError),
(DurationField, Combinable.SUB, DateTimeField, FieldError),
(DurationField, Combinable.SUB, DateTimeField, FieldError),
]
for lhs, connector, rhs, combined in tests:
msg = (
f"Cannot infer type of {connector!r} expression involving these types: "
)
with self.subTest(lhs=lhs, connector=connector, rhs=rhs, combined=combined):
expr = CombinedExpression(
Expression(lhs()),
connector,
Expression(rhs()),
)
if issubclass(combined, Exception):
with self.assertRaisesMessage(combined, msg):
expr.output_field
else:
self.assertIsInstance(expr.output_field, combined)
def test_mixed_char_date_with_annotate(self):
queryset = Experiment.objects.annotate(nonsense=F("name") + F("assigned"))
msg = (
"Cannot infer type of '+' expression involving these types: CharField, "
"DateField. You must set output_field."
)
with self.assertRaisesMessage(FieldError, msg):
list(queryset)
class ExpressionWrapperTests(SimpleTestCase):
def test_empty_group_by(self):
expr = ExpressionWrapper(Value(3), output_field=IntegerField())
self.assertEqual(expr.get_group_by_cols(alias=None), [])
def test_non_empty_group_by(self):
value = Value("f")
value.output_field = None
expr = ExpressionWrapper(Lower(value), output_field=IntegerField())
group_by_cols = expr.get_group_by_cols(alias=None)
self.assertEqual(group_by_cols, [expr.expression])
self.assertEqual(group_by_cols[0].output_field, expr.output_field)
class OrderByTests(SimpleTestCase):
def test_equal(self):
self.assertEqual(
OrderBy(F("field"), nulls_last=True),
OrderBy(F("field"), nulls_last=True),
)
self.assertNotEqual(
OrderBy(F("field"), nulls_last=True),
OrderBy(F("field")),
)
def test_hash(self):
self.assertEqual(
hash(OrderBy(F("field"), nulls_last=True)),
hash(OrderBy(F("field"), nulls_last=True)),
)
self.assertNotEqual(
hash(OrderBy(F("field"), nulls_last=True)),
hash(OrderBy(F("field"))),
)
def test_nulls_false(self):
# These tests will catch ValueError in Django 5.0 when passing False to
# nulls_first and nulls_last becomes forbidden.
# msg = "nulls_first and nulls_last values must be True or None."
msg = (
"Passing nulls_first=False or nulls_last=False is deprecated, use None "
"instead."
)
with self.assertRaisesMessage(RemovedInDjango50Warning, msg):
OrderBy(F("field"), nulls_first=False)
with self.assertRaisesMessage(RemovedInDjango50Warning, msg):
OrderBy(F("field"), nulls_last=False)
with self.assertRaisesMessage(RemovedInDjango50Warning, msg):
F("field").asc(nulls_first=False)
with self.assertRaisesMessage(RemovedInDjango50Warning, msg):
F("field").desc(nulls_last=False)
|
2530162aaa01c0406e1470402eed0fad7c1bd79fea2fa7d784b62183e7a64cc8 | import psycopg2
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.backends.ddl_references import IndexColumns
from django.db.backends.utils import strip_quotes
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
# Setting all constraints to IMMEDIATE to allow changing data in the same
# transaction.
sql_update_with_default = (
"UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL"
"; SET CONSTRAINTS ALL IMMEDIATE"
)
sql_alter_sequence_type = "ALTER SEQUENCE IF EXISTS %(sequence)s AS %(type)s"
sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE"
sql_create_index = (
"CREATE INDEX %(name)s ON %(table)s%(using)s "
"(%(columns)s)%(include)s%(extra)s%(condition)s"
)
sql_create_index_concurrently = (
"CREATE INDEX CONCURRENTLY %(name)s ON %(table)s%(using)s "
"(%(columns)s)%(include)s%(extra)s%(condition)s"
)
sql_delete_index = "DROP INDEX IF EXISTS %(name)s"
sql_delete_index_concurrently = "DROP INDEX CONCURRENTLY IF EXISTS %(name)s"
# Setting the constraint to IMMEDIATE to allow changing data in the same
# transaction.
sql_create_column_inline_fk = (
"CONSTRAINT %(name)s REFERENCES %(to_table)s(%(to_column)s)%(deferrable)s"
"; SET CONSTRAINTS %(namespace)s%(name)s IMMEDIATE"
)
# Setting the constraint to IMMEDIATE runs any deferred checks to allow
# dropping it in the same transaction.
sql_delete_fk = (
"SET CONSTRAINTS %(name)s IMMEDIATE; "
"ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
)
sql_delete_procedure = "DROP FUNCTION %(procedure)s(%(param_types)s)"
sql_add_identity = (
"ALTER TABLE %(table)s ALTER COLUMN %(column)s ADD "
"GENERATED BY DEFAULT AS IDENTITY"
)
sql_drop_indentity = (
"ALTER TABLE %(table)s ALTER COLUMN %(column)s DROP IDENTITY IF EXISTS"
)
def quote_value(self, value):
if isinstance(value, str):
value = value.replace("%", "%%")
adapted = psycopg2.extensions.adapt(value)
if hasattr(adapted, "encoding"):
adapted.encoding = "utf8"
# getquoted() returns a quoted bytestring of the adapted value.
return adapted.getquoted().decode()
def _field_indexes_sql(self, model, field):
output = super()._field_indexes_sql(model, field)
like_index_statement = self._create_like_index_sql(model, field)
if like_index_statement is not None:
output.append(like_index_statement)
return output
def _field_data_type(self, field):
if field.is_relation:
return field.rel_db_type(self.connection)
return self.connection.data_types.get(
field.get_internal_type(),
field.db_type(self.connection),
)
def _field_base_data_types(self, field):
# Yield base data types for array fields.
if field.base_field.get_internal_type() == "ArrayField":
yield from self._field_base_data_types(field.base_field)
else:
yield self._field_data_type(field.base_field)
def _create_like_index_sql(self, model, field):
"""
Return the statement to create an index with varchar operator pattern
when the column type is 'varchar' or 'text', otherwise return None.
"""
db_type = field.db_type(connection=self.connection)
if db_type is not None and (field.db_index or field.unique):
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
#
# The same doesn't apply to array fields such as varchar[size]
# and text[size], so skip them.
if "[" in db_type:
return None
# Non-deterministic collations on Postgresql don't support indexes
# for operator classes varchar_pattern_ops/text_pattern_ops.
if getattr(field, "db_collation", None):
return None
if db_type.startswith("varchar"):
return self._create_index_sql(
model,
fields=[field],
suffix="_like",
opclasses=["varchar_pattern_ops"],
)
elif db_type.startswith("text"):
return self._create_index_sql(
model,
fields=[field],
suffix="_like",
opclasses=["text_pattern_ops"],
)
return None
def _using_sql(self, new_field, old_field):
using_sql = " USING %(column)s::%(type)s"
new_internal_type = new_field.get_internal_type()
old_internal_type = old_field.get_internal_type()
if new_internal_type == "ArrayField" and new_internal_type == old_internal_type:
# Compare base data types for array fields.
if list(self._field_base_data_types(old_field)) != list(
self._field_base_data_types(new_field)
):
return using_sql
elif self._field_data_type(old_field) != self._field_data_type(new_field):
return using_sql
return ""
def _alter_column_type_sql(self, model, old_field, new_field, new_type):
# Drop indexes on varchar/text/citext columns that are changing to a
# different type.
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params["type"]
if (old_field.db_index or old_field.unique) and (
(old_type.startswith("varchar") and not new_type.startswith("varchar"))
or (old_type.startswith("text") and not new_type.startswith("text"))
or (old_type.startswith("citext") and not new_type.startswith("citext"))
):
index_name = self._create_index_name(
model._meta.db_table, [old_field.column], suffix="_like"
)
self.execute(self._delete_index_sql(model, index_name))
self.sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
# Cast when data type changed.
if using_sql := self._using_sql(new_field, old_field):
self.sql_alter_column_type += using_sql
new_internal_type = new_field.get_internal_type()
old_internal_type = old_field.get_internal_type()
# Make ALTER TYPE with IDENTITY make sense.
table = strip_quotes(model._meta.db_table)
auto_field_types = {
"AutoField",
"BigAutoField",
"SmallAutoField",
}
old_is_auto = old_internal_type in auto_field_types
new_is_auto = new_internal_type in auto_field_types
if new_is_auto and not old_is_auto:
column = strip_quotes(new_field.column)
return (
(
self.sql_alter_column_type
% {
"column": self.quote_name(column),
"type": new_type,
},
[],
),
[
(
self.sql_add_identity
% {
"table": self.quote_name(table),
"column": self.quote_name(column),
},
[],
),
],
)
elif old_is_auto and not new_is_auto:
# Drop IDENTITY if exists (pre-Django 4.1 serial columns don't have
# it).
self.execute(
self.sql_drop_indentity
% {
"table": self.quote_name(table),
"column": self.quote_name(strip_quotes(new_field.column)),
}
)
column = strip_quotes(new_field.column)
sequence_name = "%s_%s_seq" % (table, column)
fragment, _ = super()._alter_column_type_sql(
model, old_field, new_field, new_type
)
return fragment, [
(
# Drop the sequence if exists (Django 4.1+ identity columns
# don't have it).
self.sql_delete_sequence
% {
"sequence": self.quote_name(sequence_name),
},
[],
),
]
elif new_is_auto and old_is_auto and old_internal_type != new_internal_type:
fragment, _ = super()._alter_column_type_sql(
model, old_field, new_field, new_type
)
column = strip_quotes(new_field.column)
sequence_name = f"{table}_{column}_seq"
db_types = {
"AutoField": "integer",
"BigAutoField": "bigint",
"SmallAutoField": "smallint",
}
return fragment, [
# Alter the sequence type if exists (Django 4.1+ identity
# columns don't have it).
(
self.sql_alter_sequence_type
% {
"sequence": self.quote_name(sequence_name),
"type": db_types[new_internal_type],
},
[],
),
]
else:
return super()._alter_column_type_sql(model, old_field, new_field, new_type)
def _alter_column_collation_sql(
self, model, new_field, new_type, new_collation, old_field
):
sql = self.sql_alter_column_collate
# Cast when data type changed.
if using_sql := self._using_sql(new_field, old_field):
sql += using_sql
return (
sql
% {
"column": self.quote_name(new_field.column),
"type": new_type,
"collation": " " + self._collate_sql(new_collation)
if new_collation
else "",
},
[],
)
def _alter_field(
self,
model,
old_field,
new_field,
old_type,
new_type,
old_db_params,
new_db_params,
strict=False,
):
super()._alter_field(
model,
old_field,
new_field,
old_type,
new_type,
old_db_params,
new_db_params,
strict,
)
# Added an index? Create any PostgreSQL-specific indexes.
if (not (old_field.db_index or old_field.unique) and new_field.db_index) or (
not old_field.unique and new_field.unique
):
like_index_statement = self._create_like_index_sql(model, new_field)
if like_index_statement is not None:
self.execute(like_index_statement)
# Removed an index? Drop any PostgreSQL-specific indexes.
if old_field.unique and not (new_field.db_index or new_field.unique):
index_to_remove = self._create_index_name(
model._meta.db_table, [old_field.column], suffix="_like"
)
self.execute(self._delete_index_sql(model, index_to_remove))
def _index_columns(self, table, columns, col_suffixes, opclasses):
if opclasses:
return IndexColumns(
table,
columns,
self.quote_name,
col_suffixes=col_suffixes,
opclasses=opclasses,
)
return super()._index_columns(table, columns, col_suffixes, opclasses)
def add_index(self, model, index, concurrently=False):
self.execute(
index.create_sql(model, self, concurrently=concurrently), params=None
)
def remove_index(self, model, index, concurrently=False):
self.execute(index.remove_sql(model, self, concurrently=concurrently))
def _delete_index_sql(self, model, name, sql=None, concurrently=False):
sql = (
self.sql_delete_index_concurrently
if concurrently
else self.sql_delete_index
)
return super()._delete_index_sql(model, name, sql)
def _create_index_sql(
self,
model,
*,
fields=None,
name=None,
suffix="",
using="",
db_tablespace=None,
col_suffixes=(),
sql=None,
opclasses=(),
condition=None,
concurrently=False,
include=None,
expressions=None,
):
sql = (
self.sql_create_index
if not concurrently
else self.sql_create_index_concurrently
)
return super()._create_index_sql(
model,
fields=fields,
name=name,
suffix=suffix,
using=using,
db_tablespace=db_tablespace,
col_suffixes=col_suffixes,
sql=sql,
opclasses=opclasses,
condition=condition,
include=include,
expressions=expressions,
)
|
6215a7c4d7f82acf40b8f5ab51bf83712f16b160c3d4edd6e9f743a728b709f0 | import datetime
import itertools
import unittest
from copy import copy
from unittest import mock
from django.core.exceptions import FieldError
from django.core.management.color import no_style
from django.db import (
DatabaseError,
DataError,
IntegrityError,
OperationalError,
connection,
)
from django.db.models import (
CASCADE,
PROTECT,
AutoField,
BigAutoField,
BigIntegerField,
BinaryField,
BooleanField,
CharField,
CheckConstraint,
DateField,
DateTimeField,
DecimalField,
DurationField,
F,
FloatField,
ForeignKey,
ForeignObject,
Index,
IntegerField,
JSONField,
ManyToManyField,
Model,
OneToOneField,
OrderBy,
PositiveIntegerField,
Q,
SlugField,
SmallAutoField,
SmallIntegerField,
TextField,
TimeField,
UniqueConstraint,
UUIDField,
Value,
)
from django.db.models.fields.json import KeyTextTransform
from django.db.models.functions import Abs, Cast, Collate, Lower, Random, Upper
from django.db.models.indexes import IndexExpression
from django.db.transaction import TransactionManagementError, atomic
from django.test import (
TransactionTestCase,
ignore_warnings,
skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext, isolate_apps, register_lookup
from django.utils.deprecation import RemovedInDjango51Warning
from .fields import CustomManyToManyField, InheritedManyToManyField, MediumBlobField
from .models import (
Author,
AuthorCharFieldWithIndex,
AuthorTextFieldWithIndex,
AuthorWithDefaultHeight,
AuthorWithEvenLongerName,
AuthorWithIndexedName,
AuthorWithUniqueName,
AuthorWithUniqueNameAndBirthday,
Book,
BookForeignObj,
BookWeak,
BookWithLongName,
BookWithO2O,
BookWithoutAuthor,
BookWithSlug,
IntegerPK,
Node,
Note,
NoteRename,
Tag,
TagM2MTest,
TagUniqueRename,
Thing,
UniqueTest,
new_apps,
)
class SchemaTests(TransactionTestCase):
"""
Tests for the schema-alteration code.
Be aware that these tests are more liable than most to false results,
as sometimes the code to check if a test has worked is almost as complex
as the code it is testing.
"""
available_apps = []
models = [
Author,
AuthorCharFieldWithIndex,
AuthorTextFieldWithIndex,
AuthorWithDefaultHeight,
AuthorWithEvenLongerName,
Book,
BookWeak,
BookWithLongName,
BookWithO2O,
BookWithSlug,
IntegerPK,
Node,
Note,
Tag,
TagM2MTest,
TagUniqueRename,
Thing,
UniqueTest,
]
# Utility functions
def setUp(self):
# local_models should contain test dependent model classes that will be
# automatically removed from the app cache on test tear down.
self.local_models = []
# isolated_local_models contains models that are in test methods
# decorated with @isolate_apps.
self.isolated_local_models = []
def tearDown(self):
# Delete any tables made for our models
self.delete_tables()
new_apps.clear_cache()
for model in new_apps.get_models():
model._meta._expire_cache()
if "schema" in new_apps.all_models:
for model in self.local_models:
for many_to_many in model._meta.many_to_many:
through = many_to_many.remote_field.through
if through and through._meta.auto_created:
del new_apps.all_models["schema"][through._meta.model_name]
del new_apps.all_models["schema"][model._meta.model_name]
if self.isolated_local_models:
with connection.schema_editor() as editor:
for model in self.isolated_local_models:
editor.delete_model(model)
def delete_tables(self):
"Deletes all model tables for our models for a clean test environment"
converter = connection.introspection.identifier_converter
with connection.schema_editor() as editor:
connection.disable_constraint_checking()
table_names = connection.introspection.table_names()
if connection.features.ignores_table_name_case:
table_names = [table_name.lower() for table_name in table_names]
for model in itertools.chain(SchemaTests.models, self.local_models):
tbl = converter(model._meta.db_table)
if connection.features.ignores_table_name_case:
tbl = tbl.lower()
if tbl in table_names:
editor.delete_model(model)
table_names.remove(tbl)
connection.enable_constraint_checking()
def column_classes(self, model):
with connection.cursor() as cursor:
columns = {
d[0]: (connection.introspection.get_field_type(d[1], d), d)
for d in connection.introspection.get_table_description(
cursor,
model._meta.db_table,
)
}
# SQLite has a different format for field_type
for name, (type, desc) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
return columns
def get_primary_key(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_primary_key_column(cursor, table)
def get_indexes(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return [
c["columns"][0]
for c in connection.introspection.get_constraints(
cursor, table
).values()
if c["index"] and len(c["columns"]) == 1
]
def get_uniques(self, table):
with connection.cursor() as cursor:
return [
c["columns"][0]
for c in connection.introspection.get_constraints(
cursor, table
).values()
if c["unique"] and len(c["columns"]) == 1
]
def get_constraints(self, table):
"""
Get the constraints on a table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def get_constraints_for_column(self, model, column_name):
constraints = self.get_constraints(model._meta.db_table)
constraints_for_column = []
for name, details in constraints.items():
if details["columns"] == [column_name]:
constraints_for_column.append(name)
return sorted(constraints_for_column)
def check_added_field_default(
self,
schema_editor,
model,
field,
field_name,
expected_default,
cast_function=None,
):
with connection.cursor() as cursor:
schema_editor.add_field(model, field)
cursor.execute(
"SELECT {} FROM {};".format(field_name, model._meta.db_table)
)
database_default = cursor.fetchall()[0][0]
if cast_function and type(database_default) != type(expected_default):
database_default = cast_function(database_default)
self.assertEqual(database_default, expected_default)
def get_constraints_count(self, table, column, fk_to):
"""
Return a dict with keys 'fks', 'uniques, and 'indexes' indicating the
number of foreign keys, unique constraints, and indexes on
`table`.`column`. The `fk_to` argument is a 2-tuple specifying the
expected foreign key relationship's (table, column).
"""
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, table)
counts = {"fks": 0, "uniques": 0, "indexes": 0}
for c in constraints.values():
if c["columns"] == [column]:
if c["foreign_key"] == fk_to:
counts["fks"] += 1
if c["unique"]:
counts["uniques"] += 1
elif c["index"]:
counts["indexes"] += 1
return counts
def get_column_collation(self, table, column):
with connection.cursor() as cursor:
return next(
f.collation
for f in connection.introspection.get_table_description(cursor, table)
if f.name == column
)
def assertIndexOrder(self, table, index, order):
constraints = self.get_constraints(table)
self.assertIn(index, constraints)
index_orders = constraints[index]["orders"]
self.assertTrue(
all(val == expected for val, expected in zip(index_orders, order))
)
def assertForeignKeyExists(self, model, column, expected_fk_table, field="id"):
"""
Fail if the FK constraint on `model.Meta.db_table`.`column` to
`expected_fk_table`.id doesn't exist.
"""
if not connection.features.can_introspect_foreign_keys:
return
constraints = self.get_constraints(model._meta.db_table)
constraint_fk = None
for details in constraints.values():
if details["columns"] == [column] and details["foreign_key"]:
constraint_fk = details["foreign_key"]
break
self.assertEqual(constraint_fk, (expected_fk_table, field))
def assertForeignKeyNotExists(self, model, column, expected_fk_table):
if not connection.features.can_introspect_foreign_keys:
return
with self.assertRaises(AssertionError):
self.assertForeignKeyExists(model, column, expected_fk_table)
# Tests
def test_creation_deletion(self):
"""
Tries creating a model's table, and then deleting it.
"""
with connection.schema_editor() as editor:
# Create the table
editor.create_model(Author)
# The table is there
list(Author.objects.all())
# Clean up that table
editor.delete_model(Author)
# No deferred SQL should be left over.
self.assertEqual(editor.deferred_sql, [])
# The table is gone
with self.assertRaises(DatabaseError):
list(Author.objects.all())
@skipUnlessDBFeature("supports_foreign_keys")
def test_fk(self):
"Creating tables out of FK order, then repointing, works"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Book)
editor.create_model(Author)
editor.create_model(Tag)
# Initial tables are there
list(Author.objects.all())
list(Book.objects.all())
# Make sure the FK constraint is present
with self.assertRaises(IntegrityError):
Book.objects.create(
author_id=1,
title="Much Ado About Foreign Keys",
pub_date=datetime.datetime.now(),
)
# Repoint the FK constraint
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Tag, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
self.assertForeignKeyExists(Book, "author_id", "schema_tag")
@skipUnlessDBFeature("can_create_inline_fk")
def test_inline_fk(self):
# Create some tables.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.create_model(Note)
self.assertForeignKeyNotExists(Note, "book_id", "schema_book")
# Add a foreign key from one to the other.
with connection.schema_editor() as editor:
new_field = ForeignKey(Book, CASCADE)
new_field.set_attributes_from_name("book")
editor.add_field(Note, new_field)
self.assertForeignKeyExists(Note, "book_id", "schema_book")
# Creating a FK field with a constraint uses a single statement without
# a deferred ALTER TABLE.
self.assertFalse(
[
sql
for sql in (str(statement) for statement in editor.deferred_sql)
if sql.startswith("ALTER TABLE") and "ADD CONSTRAINT" in sql
]
)
@skipUnlessDBFeature("can_create_inline_fk")
def test_add_inline_fk_update_data(self):
with connection.schema_editor() as editor:
editor.create_model(Node)
# Add an inline foreign key and update data in the same transaction.
new_field = ForeignKey(Node, CASCADE, related_name="new_fk", null=True)
new_field.set_attributes_from_name("new_parent_fk")
parent = Node.objects.create()
with connection.schema_editor() as editor:
editor.add_field(Node, new_field)
editor.execute("UPDATE schema_node SET new_parent_fk_id = %s;", [parent.pk])
assertIndex = (
self.assertIn
if connection.features.indexes_foreign_keys
else self.assertNotIn
)
assertIndex("new_parent_fk_id", self.get_indexes(Node._meta.db_table))
@skipUnlessDBFeature(
"can_create_inline_fk",
"allows_multiple_constraints_on_same_fields",
)
@isolate_apps("schema")
def test_add_inline_fk_index_update_data(self):
class Node(Model):
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Node)
# Add an inline foreign key, update data, and an index in the same
# transaction.
new_field = ForeignKey(Node, CASCADE, related_name="new_fk", null=True)
new_field.set_attributes_from_name("new_parent_fk")
parent = Node.objects.create()
with connection.schema_editor() as editor:
editor.add_field(Node, new_field)
Node._meta.add_field(new_field)
editor.execute("UPDATE schema_node SET new_parent_fk_id = %s;", [parent.pk])
editor.add_index(
Node, Index(fields=["new_parent_fk"], name="new_parent_inline_fk_idx")
)
self.assertIn("new_parent_fk_id", self.get_indexes(Node._meta.db_table))
@skipUnlessDBFeature("supports_foreign_keys")
def test_char_field_with_db_index_to_fk(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorCharFieldWithIndex)
# Change CharField to FK
old_field = AuthorCharFieldWithIndex._meta.get_field("char_field")
new_field = ForeignKey(Author, CASCADE, blank=True)
new_field.set_attributes_from_name("char_field")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorCharFieldWithIndex, old_field, new_field, strict=True
)
self.assertForeignKeyExists(
AuthorCharFieldWithIndex, "char_field_id", "schema_author"
)
@skipUnlessDBFeature("supports_foreign_keys")
@skipUnlessDBFeature("supports_index_on_text_field")
def test_text_field_with_db_index_to_fk(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorTextFieldWithIndex)
# Change TextField to FK
old_field = AuthorTextFieldWithIndex._meta.get_field("text_field")
new_field = ForeignKey(Author, CASCADE, blank=True)
new_field.set_attributes_from_name("text_field")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorTextFieldWithIndex, old_field, new_field, strict=True
)
self.assertForeignKeyExists(
AuthorTextFieldWithIndex, "text_field_id", "schema_author"
)
@isolate_apps("schema")
def test_char_field_pk_to_auto_field(self):
class Foo(Model):
id = CharField(max_length=255, primary_key=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Foo
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
@skipUnlessDBFeature("supports_foreign_keys")
def test_fk_to_proxy(self):
"Creating a FK to a proxy model creates database constraints."
class AuthorProxy(Author):
class Meta:
app_label = "schema"
apps = new_apps
proxy = True
class AuthorRef(Model):
author = ForeignKey(AuthorProxy, on_delete=CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [AuthorProxy, AuthorRef]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorRef)
self.assertForeignKeyExists(AuthorRef, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_fk_db_constraint(self):
"The db_constraint parameter is respected"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Author)
editor.create_model(BookWeak)
# Initial tables are there
list(Author.objects.all())
list(Tag.objects.all())
list(BookWeak.objects.all())
self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author")
# Make a db_constraint=False FK
new_field = ForeignKey(Tag, CASCADE, db_constraint=False)
new_field.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
self.assertForeignKeyNotExists(Author, "tag_id", "schema_tag")
# Alter to one with a constraint
new_field2 = ForeignKey(Tag, CASCADE)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
self.assertForeignKeyExists(Author, "tag_id", "schema_tag")
# Alter to one without a constraint again
new_field2 = ForeignKey(Tag, CASCADE)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field2, new_field, strict=True)
self.assertForeignKeyNotExists(Author, "tag_id", "schema_tag")
@isolate_apps("schema")
def test_no_db_constraint_added_during_primary_key_change(self):
"""
When a primary key that's pointed to by a ForeignKey with
db_constraint=False is altered, a foreign key constraint isn't added.
"""
class Author(Model):
class Meta:
app_label = "schema"
class BookWeak(Model):
author = ForeignKey(Author, CASCADE, db_constraint=False)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWeak)
self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author")
old_field = Author._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = Author
new_field.set_attributes_from_name("id")
# @isolate_apps() and inner models are needed to have the model
# relations populated, otherwise this doesn't act as a regression test.
self.assertEqual(len(new_field.model._meta.related_objects), 1)
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author")
def _test_m2m_db_constraint(self, M2MFieldClass):
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(LocalAuthorWithM2M)
# Initial tables are there
list(LocalAuthorWithM2M.objects.all())
list(Tag.objects.all())
# Make a db_constraint=False FK
new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False)
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
self.assertForeignKeyNotExists(
new_field.remote_field.through, "tag_id", "schema_tag"
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_m2m_db_constraint(self):
self._test_m2m_db_constraint(ManyToManyField)
@skipUnlessDBFeature("supports_foreign_keys")
def test_m2m_db_constraint_custom(self):
self._test_m2m_db_constraint(CustomManyToManyField)
@skipUnlessDBFeature("supports_foreign_keys")
def test_m2m_db_constraint_inherited(self):
self._test_m2m_db_constraint(InheritedManyToManyField)
def test_add_field(self):
"""
Tests adding fields to models
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add the new field
new_field = IntegerField(null=True)
new_field.set_attributes_from_name("age")
with CaptureQueriesContext(
connection
) as ctx, connection.schema_editor() as editor:
editor.add_field(Author, new_field)
drop_default_sql = editor.sql_alter_column_no_default % {
"column": editor.quote_name(new_field.name),
}
self.assertFalse(
any(drop_default_sql in query["sql"] for query in ctx.captured_queries)
)
# Table is not rebuilt.
self.assertIs(
any("CREATE TABLE" in query["sql"] for query in ctx.captured_queries), False
)
self.assertIs(
any("DROP TABLE" in query["sql"] for query in ctx.captured_queries), False
)
columns = self.column_classes(Author)
self.assertEqual(
columns["age"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertTrue(columns["age"][1][6])
def test_add_field_remove_field(self):
"""
Adding a field and removing it removes all deferred sql referring to it.
"""
with connection.schema_editor() as editor:
# Create a table with a unique constraint on the slug field.
editor.create_model(Tag)
# Remove the slug column.
editor.remove_field(Tag, Tag._meta.get_field("slug"))
self.assertEqual(editor.deferred_sql, [])
def test_add_field_temp_default(self):
"""
Tests adding fields to models with a temporary default
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = CharField(max_length=30, default="Godwin")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertEqual(
columns["surname"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(
columns["surname"][1][6],
connection.features.interprets_empty_strings_as_nulls,
)
def test_add_field_temp_default_boolean(self):
"""
Tests adding fields to models with a temporary default where
the default is False. (#21783)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = BooleanField(default=False)
new_field.set_attributes_from_name("awesome")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# BooleanField are stored as TINYINT(1) on MySQL.
field_type = columns["awesome"][0]
self.assertEqual(
field_type, connection.features.introspected_field_types["BooleanField"]
)
def test_add_field_default_transform(self):
"""
Tests adding fields to models with a default that is not directly
valid in the database (#22581)
"""
class TestTransformField(IntegerField):
# Weird field that saves the count of items in its value
def get_default(self):
return self.default
def get_prep_value(self, value):
if value is None:
return 0
return len(value)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add the field with a default it needs to cast (to string in this case)
new_field = TestTransformField(default={1: 2})
new_field.set_attributes_from_name("thing")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is there
columns = self.column_classes(Author)
field_type, field_info = columns["thing"]
self.assertEqual(
field_type, connection.features.introspected_field_types["IntegerField"]
)
# Make sure the values were transformed correctly
self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2)
def test_add_field_o2o_nullable(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Note)
new_field = OneToOneField(Note, CASCADE, null=True)
new_field.set_attributes_from_name("note")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertIn("note_id", columns)
self.assertTrue(columns["note_id"][1][6])
def test_add_field_binary(self):
"""
Tests binary fields get a sane default (#22851)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field
new_field = BinaryField(blank=True)
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# MySQL annoyingly uses the same backend, so it'll come back as one of
# these two types.
self.assertIn(columns["bits"][0], ("BinaryField", "TextField"))
def test_add_field_durationfield_with_default(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
new_field = DurationField(default=datetime.timedelta(minutes=10))
new_field.set_attributes_from_name("duration")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertEqual(
columns["duration"][0],
connection.features.introspected_field_types["DurationField"],
)
@unittest.skipUnless(connection.vendor == "mysql", "MySQL specific")
def test_add_binaryfield_mediumblob(self):
"""
Test adding a custom-sized binary field on MySQL (#24846).
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field with default
new_field = MediumBlobField(blank=True, default=b"123")
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# Introspection treats BLOBs as TextFields
self.assertEqual(columns["bits"][0], "TextField")
@isolate_apps("schema")
def test_add_auto_field(self):
class AddAutoFieldModel(Model):
name = CharField(max_length=255, primary_key=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(AddAutoFieldModel)
self.isolated_local_models = [AddAutoFieldModel]
old_field = AddAutoFieldModel._meta.get_field("name")
new_field = CharField(max_length=255)
new_field.set_attributes_from_name("name")
new_field.model = AddAutoFieldModel
with connection.schema_editor() as editor:
editor.alter_field(AddAutoFieldModel, old_field, new_field)
new_auto_field = AutoField(primary_key=True)
new_auto_field.set_attributes_from_name("id")
new_auto_field.model = AddAutoFieldModel()
with connection.schema_editor() as editor:
editor.add_field(AddAutoFieldModel, new_auto_field)
# Crashes on PostgreSQL when the GENERATED BY suffix is missing.
AddAutoFieldModel.objects.create(name="test")
def test_remove_field(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
with CaptureQueriesContext(connection) as ctx:
editor.remove_field(Author, Author._meta.get_field("name"))
columns = self.column_classes(Author)
self.assertNotIn("name", columns)
if getattr(connection.features, "can_alter_table_drop_column", True):
# Table is not rebuilt.
self.assertIs(
any("CREATE TABLE" in query["sql"] for query in ctx.captured_queries),
False,
)
self.assertIs(
any("DROP TABLE" in query["sql"] for query in ctx.captured_queries),
False,
)
def test_remove_indexed_field(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorCharFieldWithIndex)
with connection.schema_editor() as editor:
editor.remove_field(
AuthorCharFieldWithIndex,
AuthorCharFieldWithIndex._meta.get_field("char_field"),
)
columns = self.column_classes(AuthorCharFieldWithIndex)
self.assertNotIn("char_field", columns)
def test_alter(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(
bool(columns["name"][1][6]),
bool(connection.features.interprets_empty_strings_as_nulls),
)
# Alter the name field to a TextField
old_field = Author._meta.get_field("name")
new_field = TextField(null=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
self.assertEqual(columns["name"][0], "TextField")
self.assertTrue(columns["name"][1][6])
# Change nullability again
new_field2 = TextField(null=False)
new_field2.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
columns = self.column_classes(Author)
self.assertEqual(columns["name"][0], "TextField")
self.assertEqual(
bool(columns["name"][1][6]),
bool(connection.features.interprets_empty_strings_as_nulls),
)
def test_alter_auto_field_to_integer_field(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change AutoField to IntegerField
old_field = Author._meta.get_field("id")
new_field = IntegerField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Now that ID is an IntegerField, the database raises an error if it
# isn't provided.
if not connection.features.supports_unspecified_pk:
with self.assertRaises(DatabaseError):
Author.objects.create()
def test_alter_auto_field_to_char_field(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change AutoField to CharField
old_field = Author._meta.get_field("id")
new_field = CharField(primary_key=True, max_length=50)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
@isolate_apps("schema")
def test_alter_auto_field_quoted_db_column(self):
class Foo(Model):
id = AutoField(primary_key=True, db_column='"quoted_id"')
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = Foo
new_field.db_column = '"quoted_id"'
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
Foo.objects.create()
def test_alter_not_unique_field_to_primary_key(self):
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change UUIDField to primary key.
old_field = Author._meta.get_field("uuid")
new_field = UUIDField(primary_key=True)
new_field.set_attributes_from_name("uuid")
new_field.model = Author
with connection.schema_editor() as editor:
editor.remove_field(Author, Author._meta.get_field("id"))
editor.alter_field(Author, old_field, new_field, strict=True)
# Redundant unique constraint is not added.
count = self.get_constraints_count(
Author._meta.db_table,
Author._meta.get_field("uuid").column,
None,
)
self.assertLessEqual(count["uniques"], 1)
@isolate_apps("schema")
def test_alter_primary_key_quoted_db_table(self):
class Foo(Model):
class Meta:
app_label = "schema"
db_table = '"foo"'
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = Foo
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
Foo.objects.create()
def test_alter_text_field(self):
# Regression for "BLOB/TEXT column 'info' can't have a default value")
# on MySQL.
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = TextField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
def test_alter_text_field_to_not_null_with_default_value(self):
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("address")
new_field = TextField(blank=True, default="", null=False)
new_field.set_attributes_from_name("address")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
@skipUnlessDBFeature("can_defer_constraint_checks", "can_rollback_ddl")
def test_alter_fk_checks_deferred_constraints(self):
"""
#25492 - Altering a foreign key's structure and data in the same
transaction.
"""
with connection.schema_editor() as editor:
editor.create_model(Node)
old_field = Node._meta.get_field("parent")
new_field = ForeignKey(Node, CASCADE)
new_field.set_attributes_from_name("parent")
parent = Node.objects.create()
with connection.schema_editor() as editor:
# Update the parent FK to create a deferred constraint check.
Node.objects.update(parent=parent)
editor.alter_field(Node, old_field, new_field, strict=True)
@isolate_apps("schema")
def test_alter_null_with_default_value_deferred_constraints(self):
class Publisher(Model):
class Meta:
app_label = "schema"
class Article(Model):
publisher = ForeignKey(Publisher, CASCADE)
title = CharField(max_length=50, null=True)
description = CharField(max_length=100, null=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Publisher)
editor.create_model(Article)
self.isolated_local_models = [Article, Publisher]
publisher = Publisher.objects.create()
Article.objects.create(publisher=publisher)
old_title = Article._meta.get_field("title")
new_title = CharField(max_length=50, null=False, default="")
new_title.set_attributes_from_name("title")
old_description = Article._meta.get_field("description")
new_description = CharField(max_length=100, null=False, default="")
new_description.set_attributes_from_name("description")
with connection.schema_editor() as editor:
editor.alter_field(Article, old_title, new_title, strict=True)
editor.alter_field(Article, old_description, new_description, strict=True)
def test_alter_text_field_to_date_field(self):
"""
#25002 - Test conversion of text field to date field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info="1988-05-05")
old_field = Note._meta.get_field("info")
new_field = DateField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns["info"][1][6])
def test_alter_text_field_to_datetime_field(self):
"""
#25002 - Test conversion of text field to datetime field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info="1988-05-05 3:16:17.4567")
old_field = Note._meta.get_field("info")
new_field = DateTimeField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns["info"][1][6])
def test_alter_text_field_to_time_field(self):
"""
#25002 - Test conversion of text field to time field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info="3:16:17.4567")
old_field = Note._meta.get_field("info")
new_field = TimeField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns["info"][1][6])
@skipIfDBFeature("interprets_empty_strings_as_nulls")
def test_alter_textual_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=50)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
@skipUnlessDBFeature("interprets_empty_strings_as_nulls")
def test_alter_textual_field_not_null_to_null(self):
"""
Nullability for textual fields is preserved on databases that
interpret empty strings as NULLs.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
columns = self.column_classes(Author)
# Field is nullable.
self.assertTrue(columns["uuid"][1][6])
# Change to NOT NULL.
old_field = Author._meta.get_field("uuid")
new_field = SlugField(null=False, blank=True)
new_field.set_attributes_from_name("uuid")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
# Nullability is preserved.
self.assertTrue(columns["uuid"][1][6])
def test_alter_numeric_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug="aaa")
old_field = UniqueTest._meta.get_field("year")
new_field = BigIntegerField()
new_field.set_attributes_from_name("year")
with connection.schema_editor() as editor:
editor.alter_field(UniqueTest, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug="bbb")
def test_alter_null_to_not_null(self):
"""
#23609 - Tests handling of default values when altering from NULL to NOT NULL.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertTrue(columns["height"][1][6])
# Create some test data
Author.objects.create(name="Not null author", height=12)
Author.objects.create(name="Null author")
# Verify null value
self.assertEqual(Author.objects.get(name="Not null author").height, 12)
self.assertIsNone(Author.objects.get(name="Null author").height)
# Alter the height field to NOT NULL with default
old_field = Author._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
self.assertFalse(columns["height"][1][6])
# Verify default value
self.assertEqual(Author.objects.get(name="Not null author").height, 12)
self.assertEqual(Author.objects.get(name="Null author").height, 42)
def test_alter_charfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_nulls when changing a CharField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change the CharField to null
old_field = Author._meta.get_field("name")
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_char_field_decrease_length(self):
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
Author.objects.create(name="x" * 255)
# Change max_length of CharField.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
msg = "value too long for type character varying(254)"
with self.assertRaisesMessage(DataError, msg):
editor.alter_field(Author, old_field, new_field, strict=True)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_with_custom_db_type(self):
from django.contrib.postgres.fields import ArrayField
class Foo(Model):
field = ArrayField(CharField(max_length=255))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("field")
new_field = ArrayField(CharField(max_length=16))
new_field.set_attributes_from_name("field")
new_field.model = Foo
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_array_field_decrease_base_field_length(self):
from django.contrib.postgres.fields import ArrayField
class ArrayModel(Model):
field = ArrayField(CharField(max_length=16))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(ArrayModel)
self.isolated_local_models = [ArrayModel]
ArrayModel.objects.create(field=["x" * 16])
old_field = ArrayModel._meta.get_field("field")
new_field = ArrayField(CharField(max_length=15))
new_field.set_attributes_from_name("field")
new_field.model = ArrayModel
with connection.schema_editor() as editor:
msg = "value too long for type character varying(15)"
with self.assertRaisesMessage(DataError, msg):
editor.alter_field(ArrayModel, old_field, new_field, strict=True)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_array_field_decrease_nested_base_field_length(self):
from django.contrib.postgres.fields import ArrayField
class ArrayModel(Model):
field = ArrayField(ArrayField(CharField(max_length=16)))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(ArrayModel)
self.isolated_local_models = [ArrayModel]
ArrayModel.objects.create(field=[["x" * 16]])
old_field = ArrayModel._meta.get_field("field")
new_field = ArrayField(ArrayField(CharField(max_length=15)))
new_field.set_attributes_from_name("field")
new_field.model = ArrayModel
with connection.schema_editor() as editor:
msg = "value too long for type character varying(15)"
with self.assertRaisesMessage(DataError, msg):
editor.alter_field(ArrayModel, old_field, new_field, strict=True)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
@skipUnlessDBFeature(
"supports_collation_on_charfield",
"supports_non_deterministic_collations",
)
def test_db_collation_arrayfield(self):
from django.contrib.postgres.fields import ArrayField
ci_collation = "case_insensitive"
cs_collation = "en-x-icu"
def drop_collation():
with connection.cursor() as cursor:
cursor.execute(f"DROP COLLATION IF EXISTS {ci_collation}")
with connection.cursor() as cursor:
cursor.execute(
f"CREATE COLLATION IF NOT EXISTS {ci_collation} (provider = icu, "
f"locale = 'und-u-ks-level2', deterministic = false)"
)
self.addCleanup(drop_collation)
class ArrayModel(Model):
field = ArrayField(CharField(max_length=16, db_collation=ci_collation))
class Meta:
app_label = "schema"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(ArrayModel)
self.isolated_local_models = [ArrayModel]
self.assertEqual(
self.get_column_collation(ArrayModel._meta.db_table, "field"),
ci_collation,
)
# Alter collation.
old_field = ArrayModel._meta.get_field("field")
new_field_cs = ArrayField(CharField(max_length=16, db_collation=cs_collation))
new_field_cs.set_attributes_from_name("field")
new_field_cs.model = ArrayField
with connection.schema_editor() as editor:
editor.alter_field(ArrayModel, old_field, new_field_cs, strict=True)
self.assertEqual(
self.get_column_collation(ArrayModel._meta.db_table, "field"),
cs_collation,
)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
@skipUnlessDBFeature(
"supports_collation_on_charfield",
"supports_non_deterministic_collations",
)
def test_unique_with_collation_charfield(self):
ci_collation = "case_insensitive"
def drop_collation():
with connection.cursor() as cursor:
cursor.execute(f"DROP COLLATION IF EXISTS {ci_collation}")
with connection.cursor() as cursor:
cursor.execute(
f"CREATE COLLATION IF NOT EXISTS {ci_collation} (provider = icu, "
f"locale = 'und-u-ks-level2', deterministic = false)"
)
self.addCleanup(drop_collation)
class CiCharModel(Model):
field = CharField(max_length=16, db_collation=ci_collation, unique=True)
class Meta:
app_label = "schema"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(CiCharModel)
self.isolated_local_models = [CiCharModel]
self.assertEqual(
self.get_column_collation(CiCharModel._meta.db_table, "field"),
ci_collation,
)
self.assertIn("field", self.get_uniques(CiCharModel._meta.db_table))
def test_alter_textfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_nulls when changing a TextField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
# Change the TextField to null
old_field = Note._meta.get_field("info")
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
def test_alter_null_to_not_null_keeping_default(self):
"""
#23738 - Can change a nullable field with default to non-nullable
with the same default.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
# Ensure the field is right to begin with
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertTrue(columns["height"][1][6])
# Alter the height field to NOT NULL keeping the previous default
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorWithDefaultHeight, old_field, new_field, strict=True
)
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertFalse(columns["height"][1][6])
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_fk(self):
"""
Tests altering of FKs
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
# Alter the FK
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE, editable=False)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_to_fk(self):
"""
#24447 - Tests adding a FK constraint for an existing column
"""
class LocalBook(Model):
author = IntegerField()
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalBook]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBook)
# Ensure no FK constraint exists
constraints = self.get_constraints(LocalBook._meta.db_table)
for details in constraints.values():
if details["foreign_key"]:
self.fail(
"Found an unexpected FK constraint to %s" % details["columns"]
)
old_field = LocalBook._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(LocalBook, old_field, new_field, strict=True)
self.assertForeignKeyExists(LocalBook, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_alter_o2o_to_fk(self):
"""
#24163 - Tests altering of OneToOneField to ForeignKey
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
# Ensure the field is right to begin with
columns = self.column_classes(BookWithO2O)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is unique
author = Author.objects.create(name="Joe")
BookWithO2O.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
BookWithO2O.objects.all().delete()
self.assertForeignKeyExists(BookWithO2O, "author_id", "schema_author")
# Alter the OneToOneField to ForeignKey
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is not unique anymore
Book.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
Book.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_alter_fk_to_o2o(self):
"""
#24163 - Tests altering of ForeignKey to OneToOneField
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is not unique
author = Author.objects.create(name="Joe")
Book.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
Book.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
Book.objects.all().delete()
self.assertForeignKeyExists(Book, "author_id", "schema_author")
# Alter the ForeignKey to OneToOneField
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
columns = self.column_classes(BookWithO2O)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# Ensure the field is unique now
BookWithO2O.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
self.assertForeignKeyExists(BookWithO2O, "author_id", "schema_author")
def test_alter_field_fk_to_o2o(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
expected_indexes = 1 if connection.features.indexes_foreign_keys else 0
# Check the index is right to begin with.
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(
counts,
{"fks": expected_fks, "uniques": 0, "indexes": expected_indexes},
)
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field)
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The index on ForeignKey is replaced with a unique constraint for
# OneToOneField.
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
def test_autofield_to_o2o(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Note)
# Rename the field.
old_field = Author._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("note_ptr")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Alter AutoField to OneToOneField.
new_field_o2o = OneToOneField(Note, CASCADE)
new_field_o2o.set_attributes_from_name("note_ptr")
new_field_o2o.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field_o2o, strict=True)
columns = self.column_classes(Author)
field_type, _ = columns["note_ptr_id"]
self.assertEqual(
field_type, connection.features.introspected_field_types["IntegerField"]
)
def test_alter_field_fk_keeps_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
expected_indexes = 1 if connection.features.indexes_foreign_keys else 0
# Check the index is right to begin with.
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(
counts,
{"fks": expected_fks, "uniques": 0, "indexes": expected_indexes},
)
old_field = Book._meta.get_field("author")
# on_delete changed from CASCADE.
new_field = ForeignKey(Author, PROTECT)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The index remains.
self.assertEqual(
counts,
{"fks": expected_fks, "uniques": 0, "indexes": expected_indexes},
)
def test_alter_field_o2o_to_fk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
# Check the unique constraint is right to begin with.
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field)
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The unique constraint on OneToOneField is replaced with an index for
# ForeignKey.
self.assertEqual(counts, {"fks": expected_fks, "uniques": 0, "indexes": 1})
def test_alter_field_o2o_keeps_unique(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
# Check the unique constraint is right to begin with.
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
old_field = BookWithO2O._meta.get_field("author")
# on_delete changed from CASCADE.
new_field = OneToOneField(Author, PROTECT)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The unique constraint remains.
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
@skipUnlessDBFeature("ignores_table_name_case")
def test_alter_db_table_case(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Alter the case of the table
old_table_name = Author._meta.db_table
with connection.schema_editor() as editor:
editor.alter_db_table(Author, old_table_name, old_table_name.upper())
def test_alter_implicit_id_to_explicit(self):
"""
Should be able to convert an implicit "id" field to an explicit "id"
primary key field.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# This will fail if DROP DEFAULT is inadvertently executed on this
# field which drops the id sequence, at least on PostgreSQL.
Author.objects.create(name="Foo")
Author.objects.create(name="Bar")
def test_alter_autofield_pk_to_bigautofield_pk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
Author.objects.create(name="Foo", pk=1)
with connection.cursor() as cursor:
sequence_reset_sqls = connection.ops.sequence_reset_sql(
no_style(), [Author]
)
if sequence_reset_sqls:
cursor.execute(sequence_reset_sqls[0])
self.assertIsNotNone(Author.objects.create(name="Bar"))
def test_alter_autofield_pk_to_smallautofield_pk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = SmallAutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
Author.objects.create(name="Foo", pk=1)
with connection.cursor() as cursor:
sequence_reset_sqls = connection.ops.sequence_reset_sql(
no_style(), [Author]
)
if sequence_reset_sqls:
cursor.execute(sequence_reset_sqls[0])
self.assertIsNotNone(Author.objects.create(name="Bar"))
def test_alter_int_pk_to_autofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
AutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field("i")
new_field = AutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# A model representing the updated model.
class IntegerPKToAutoField(Model):
i = AutoField(primary_key=True)
j = IntegerField(unique=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = IntegerPK._meta.db_table
# An id (i) is generated by the database.
obj = IntegerPKToAutoField.objects.create(j=1)
self.assertIsNotNone(obj.i)
def test_alter_int_pk_to_bigautofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
BigAutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field("i")
new_field = BigAutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# A model representing the updated model.
class IntegerPKToBigAutoField(Model):
i = BigAutoField(primary_key=True)
j = IntegerField(unique=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = IntegerPK._meta.db_table
# An id (i) is generated by the database.
obj = IntegerPKToBigAutoField.objects.create(j=1)
self.assertIsNotNone(obj.i)
@isolate_apps("schema")
def test_alter_smallint_pk_to_smallautofield_pk(self):
"""
Should be able to rename an SmallIntegerField(primary_key=True) to
SmallAutoField(primary_key=True).
"""
class SmallIntegerPK(Model):
i = SmallIntegerField(primary_key=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(SmallIntegerPK)
self.isolated_local_models = [SmallIntegerPK]
old_field = SmallIntegerPK._meta.get_field("i")
new_field = SmallAutoField(primary_key=True)
new_field.model = SmallIntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(SmallIntegerPK, old_field, new_field, strict=True)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_serial_auto_field_to_bigautofield(self):
class SerialAutoField(Model):
id = SmallAutoField(primary_key=True)
class Meta:
app_label = "schema"
table = SerialAutoField._meta.db_table
column = SerialAutoField._meta.get_field("id").column
with connection.cursor() as cursor:
cursor.execute(
f'CREATE TABLE "{table}" '
f'("{column}" smallserial NOT NULL PRIMARY KEY)'
)
try:
old_field = SerialAutoField._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = SerialAutoField
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(SerialAutoField, old_field, new_field, strict=True)
with connection.cursor() as cursor:
cursor.execute(
"SELECT data_type FROM pg_sequences WHERE sequencename = %s",
[f"{table}_{column}_seq"],
)
row = cursor.fetchone()
sequence_data_type = row[0] if row and row[0] else None
self.assertEqual(sequence_data_type, "bigint")
finally:
with connection.cursor() as cursor:
cursor.execute(f'DROP TABLE "{table}"')
def test_alter_int_pk_to_int_unique(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
IntegerField(unique=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
# Delete the old PK
old_field = IntegerPK._meta.get_field("i")
new_field = IntegerField(unique=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# The primary key constraint is gone. Result depends on database:
# 'id' for SQLite, None for others (must not be 'i').
self.assertIn(self.get_primary_key(IntegerPK._meta.db_table), ("id", None))
# Set up a model class as it currently stands. The original IntegerPK
# class is now out of date and some backends make use of the whole
# model class when modifying a field (such as sqlite3 when remaking a
# table) so an outdated model class leads to incorrect results.
class Transitional(Model):
i = IntegerField(unique=True)
j = IntegerField(unique=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = "INTEGERPK"
# model requires a new PK
old_field = Transitional._meta.get_field("j")
new_field = IntegerField(primary_key=True)
new_field.model = Transitional
new_field.set_attributes_from_name("j")
with connection.schema_editor() as editor:
editor.alter_field(Transitional, old_field, new_field, strict=True)
# Create a model class representing the updated model.
class IntegerUnique(Model):
i = IntegerField(unique=True)
j = IntegerField(primary_key=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = "INTEGERPK"
# Ensure unique constraint works.
IntegerUnique.objects.create(i=1, j=1)
with self.assertRaises(IntegrityError):
IntegerUnique.objects.create(i=1, j=2)
def test_rename(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertNotIn("display_name", columns)
# Alter the name field's name
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("display_name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
self.assertEqual(
columns["display_name"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertNotIn("name", columns)
@isolate_apps("schema")
def test_rename_referenced_field(self):
class Author(Model):
name = CharField(max_length=255, unique=True)
class Meta:
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE, to_field="name")
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("renamed")
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
editor.alter_field(Author, Author._meta.get_field("name"), new_field)
# Ensure the foreign key reference was updated.
self.assertForeignKeyExists(Book, "author_id", "schema_author", "renamed")
@skipIfDBFeature("interprets_empty_strings_as_nulls")
def test_rename_keep_null_status(self):
"""
Renaming a field shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = TextField()
new_field.set_attributes_from_name("detail_info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns["detail_info"][0], "TextField")
self.assertNotIn("info", columns)
with self.assertRaises(IntegrityError):
NoteRename.objects.create(detail_info=None)
def _test_m2m_create(self, M2MFieldClass):
"""
Tests M2M fields on models during creation
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author, CASCADE)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2M)
# Ensure there is now an m2m table there
columns = self.column_classes(
LocalBookWithM2M._meta.get_field("tags").remote_field.through
)
self.assertEqual(
columns["tagm2mtest_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
def test_m2m_create(self):
self._test_m2m_create(ManyToManyField)
def test_m2m_create_custom(self):
self._test_m2m_create(CustomManyToManyField)
def test_m2m_create_inherited(self):
self._test_m2m_create(InheritedManyToManyField)
def _test_m2m_create_through(self, M2MFieldClass):
"""
Tests M2M fields on models during creation with through models
"""
class LocalTagThrough(Model):
book = ForeignKey("schema.LocalBookWithM2MThrough", CASCADE)
tag = ForeignKey("schema.TagM2MTest", CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
class LocalBookWithM2MThrough(Model):
tags = M2MFieldClass(
"TagM2MTest", related_name="books", through=LocalTagThrough
)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalTagThrough, LocalBookWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalTagThrough)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2MThrough)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalTagThrough)
self.assertEqual(
columns["book_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertEqual(
columns["tag_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
def test_m2m_create_through(self):
self._test_m2m_create_through(ManyToManyField)
def test_m2m_create_through_custom(self):
self._test_m2m_create_through(CustomManyToManyField)
def test_m2m_create_through_inherited(self):
self._test_m2m_create_through(InheritedManyToManyField)
def test_m2m_through_remove(self):
class LocalAuthorNoteThrough(Model):
book = ForeignKey("schema.Author", CASCADE)
tag = ForeignKey("self", CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
class LocalNoteWithM2MThrough(Model):
authors = ManyToManyField("schema.Author", through=LocalAuthorNoteThrough)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorNoteThrough, LocalNoteWithM2MThrough]
# Create the tables.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalAuthorNoteThrough)
editor.create_model(LocalNoteWithM2MThrough)
# Remove the through parameter.
old_field = LocalNoteWithM2MThrough._meta.get_field("authors")
new_field = ManyToManyField("Author")
new_field.set_attributes_from_name("authors")
msg = (
f"Cannot alter field {old_field} into {new_field} - they are not "
f"compatible types (you cannot alter to or from M2M fields, or add or "
f"remove through= on M2M fields)"
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
editor.alter_field(LocalNoteWithM2MThrough, old_field, new_field)
def _test_m2m(self, M2MFieldClass):
"""
Tests adding/removing M2M fields on models
"""
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorWithM2M)
editor.create_model(TagM2MTest)
# Create an M2M field
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors")
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Ensure there's no m2m table there
with self.assertRaises(DatabaseError):
self.column_classes(new_field.remote_field.through)
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
# Ensure there is now an m2m table there
columns = self.column_classes(new_field.remote_field.through)
self.assertEqual(
columns["tagm2mtest_id"][0],
connection.features.introspected_field_types["IntegerField"],
)
# "Alter" the field. This should not rename the DB table to itself.
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2M, new_field, new_field, strict=True)
# Remove the M2M table again
with connection.schema_editor() as editor:
editor.remove_field(LocalAuthorWithM2M, new_field)
# Ensure there's no m2m table there
with self.assertRaises(DatabaseError):
self.column_classes(new_field.remote_field.through)
# Make sure the model state is coherent with the table one now that
# we've removed the tags field.
opts = LocalAuthorWithM2M._meta
opts.local_many_to_many.remove(new_field)
del new_apps.all_models["schema"][
new_field.remote_field.through._meta.model_name
]
opts._expire_cache()
def test_m2m(self):
self._test_m2m(ManyToManyField)
def test_m2m_custom(self):
self._test_m2m(CustomManyToManyField)
def test_m2m_inherited(self):
self._test_m2m(InheritedManyToManyField)
def _test_m2m_through_alter(self, M2MFieldClass):
"""
Tests altering M2Ms with explicit through models (should no-op)
"""
class LocalAuthorTag(Model):
author = ForeignKey("schema.LocalAuthorWithM2MThrough", CASCADE)
tag = ForeignKey("schema.TagM2MTest", CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
class LocalAuthorWithM2MThrough(Model):
name = CharField(max_length=255)
tags = M2MFieldClass(
"schema.TagM2MTest", related_name="authors", through=LocalAuthorTag
)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorTag)
editor.create_model(LocalAuthorWithM2MThrough)
editor.create_model(TagM2MTest)
# Ensure the m2m table is there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
# "Alter" the field's blankness. This should not actually do anything.
old_field = LocalAuthorWithM2MThrough._meta.get_field("tags")
new_field = M2MFieldClass(
"schema.TagM2MTest", related_name="authors", through=LocalAuthorTag
)
new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags")
with connection.schema_editor() as editor:
editor.alter_field(
LocalAuthorWithM2MThrough, old_field, new_field, strict=True
)
# Ensure the m2m table is still there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
def test_m2m_through_alter(self):
self._test_m2m_through_alter(ManyToManyField)
def test_m2m_through_alter_custom(self):
self._test_m2m_through_alter(CustomManyToManyField)
def test_m2m_through_alter_inherited(self):
self._test_m2m_through_alter(InheritedManyToManyField)
def _test_m2m_repoint(self, M2MFieldClass):
"""
Tests repointing M2M fields
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author, CASCADE)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBookWithM2M)
editor.create_model(TagM2MTest)
editor.create_model(UniqueTest)
# Ensure the M2M exists and points to TagM2MTest
if connection.features.supports_foreign_keys:
self.assertForeignKeyExists(
LocalBookWithM2M._meta.get_field("tags").remote_field.through,
"tagm2mtest_id",
"schema_tagm2mtest",
)
# Repoint the M2M
old_field = LocalBookWithM2M._meta.get_field("tags")
new_field = M2MFieldClass(UniqueTest)
new_field.contribute_to_class(LocalBookWithM2M, "uniques")
with connection.schema_editor() as editor:
editor.alter_field(LocalBookWithM2M, old_field, new_field, strict=True)
# Ensure old M2M is gone
with self.assertRaises(DatabaseError):
self.column_classes(
LocalBookWithM2M._meta.get_field("tags").remote_field.through
)
# This model looks like the new model and is used for teardown.
opts = LocalBookWithM2M._meta
opts.local_many_to_many.remove(old_field)
# Ensure the new M2M exists and points to UniqueTest
if connection.features.supports_foreign_keys:
self.assertForeignKeyExists(
new_field.remote_field.through, "uniquetest_id", "schema_uniquetest"
)
def test_m2m_repoint(self):
self._test_m2m_repoint(ManyToManyField)
def test_m2m_repoint_custom(self):
self._test_m2m_repoint(CustomManyToManyField)
def test_m2m_repoint_inherited(self):
self._test_m2m_repoint(InheritedManyToManyField)
@isolate_apps("schema")
def test_m2m_rename_field_in_target_model(self):
class LocalTagM2MTest(Model):
title = CharField(max_length=255)
class Meta:
app_label = "schema"
class LocalM2M(Model):
tags = ManyToManyField(LocalTagM2MTest)
class Meta:
app_label = "schema"
# Create the tables.
with connection.schema_editor() as editor:
editor.create_model(LocalM2M)
editor.create_model(LocalTagM2MTest)
self.isolated_local_models = [LocalM2M, LocalTagM2MTest]
# Ensure the m2m table is there.
self.assertEqual(len(self.column_classes(LocalM2M)), 1)
# Alter a field in LocalTagM2MTest.
old_field = LocalTagM2MTest._meta.get_field("title")
new_field = CharField(max_length=254)
new_field.contribute_to_class(LocalTagM2MTest, "title1")
# @isolate_apps() and inner models are needed to have the model
# relations populated, otherwise this doesn't act as a regression test.
self.assertEqual(len(new_field.model._meta.related_objects), 1)
with connection.schema_editor() as editor:
editor.alter_field(LocalTagM2MTest, old_field, new_field, strict=True)
# Ensure the m2m table is still there.
self.assertEqual(len(self.column_classes(LocalM2M)), 1)
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
def test_check_constraints(self):
"""
Tests creating/deleting CHECK constraints
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the constraint exists
constraints = self.get_constraints(Author._meta.db_table)
if not any(
details["columns"] == ["height"] and details["check"]
for details in constraints.values()
):
self.fail("No check constraint for height found")
# Alter the column to remove it
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for details in constraints.values():
if details["columns"] == ["height"] and details["check"]:
self.fail("Check constraint for height found")
# Alter the column to re-add it
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
if not any(
details["columns"] == ["height"] and details["check"]
for details in constraints.values()
):
self.fail("No check constraint for height found")
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
@isolate_apps("schema")
def test_check_constraint_timedelta_param(self):
class DurationModel(Model):
duration = DurationField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(DurationModel)
self.isolated_local_models = [DurationModel]
constraint_name = "duration_gte_5_minutes"
constraint = CheckConstraint(
check=Q(duration__gt=datetime.timedelta(minutes=5)),
name=constraint_name,
)
DurationModel._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(DurationModel, constraint)
constraints = self.get_constraints(DurationModel._meta.db_table)
self.assertIn(constraint_name, constraints)
with self.assertRaises(IntegrityError), atomic():
DurationModel.objects.create(duration=datetime.timedelta(minutes=4))
DurationModel.objects.create(duration=datetime.timedelta(minutes=10))
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
def test_remove_field_check_does_not_remove_meta_constraints(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the custom check constraint
constraint = CheckConstraint(
check=Q(height__gte=0), name="author_height_gte_0_check"
)
custom_constraint_name = constraint.name
Author._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
# Ensure the constraints exist
constraints = self.get_constraints(Author._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["height"]
and details["check"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Alter the column to remove field check
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["height"]
and details["check"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 0)
# Alter the column to re-add field check
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["height"]
and details["check"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the check constraint
with connection.schema_editor() as editor:
Author._meta.constraints = []
editor.remove_constraint(Author, constraint)
def test_unique(self):
"""
Tests removing and adding unique constraints to a single column.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the field is unique to begin with
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be non-unique
old_field = Tag._meta.get_field("slug")
new_field = SlugField(unique=False)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
# Ensure the field is no longer unique
Tag.objects.create(title="foo", slug="foo")
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be unique
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
# Ensure the field is unique again
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Rename the field
new_field3 = SlugField(unique=True)
new_field3.set_attributes_from_name("slug2")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field2, new_field3, strict=True)
# Ensure the field is still unique
TagUniqueRename.objects.create(title="foo", slug2="foo")
with self.assertRaises(IntegrityError):
TagUniqueRename.objects.create(title="bar", slug2="foo")
Tag.objects.all().delete()
def test_unique_name_quoting(self):
old_table_name = TagUniqueRename._meta.db_table
try:
with connection.schema_editor() as editor:
editor.create_model(TagUniqueRename)
editor.alter_db_table(TagUniqueRename, old_table_name, "unique-table")
TagUniqueRename._meta.db_table = "unique-table"
# This fails if the unique index name isn't quoted.
editor.alter_unique_together(TagUniqueRename, [], (("title", "slug2"),))
finally:
with connection.schema_editor() as editor:
editor.delete_model(TagUniqueRename)
TagUniqueRename._meta.db_table = old_table_name
@isolate_apps("schema")
@skipUnlessDBFeature("supports_foreign_keys")
def test_unique_no_unnecessary_fk_drops(self):
"""
If AlterField isn't selective about dropping foreign key constraints
when modifying a field with a unique constraint, the AlterField
incorrectly drops and recreates the Book.author foreign key even though
it doesn't restrict the field being changed (#29193).
"""
class Author(Model):
name = CharField(max_length=254, unique=True)
class Meta:
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
new_field = CharField(max_length=255, unique=True)
new_field.model = Author
new_field.set_attributes_from_name("name")
with self.assertLogs("django.db.backends.schema", "DEBUG") as cm:
with connection.schema_editor() as editor:
editor.alter_field(Author, Author._meta.get_field("name"), new_field)
# One SQL statement is executed to alter the field.
self.assertEqual(len(cm.records), 1)
@isolate_apps("schema")
def test_unique_and_reverse_m2m(self):
"""
AlterField can modify a unique field when there's a reverse M2M
relation on the model.
"""
class Tag(Model):
title = CharField(max_length=255)
slug = SlugField(unique=True)
class Meta:
app_label = "schema"
class Book(Model):
tags = ManyToManyField(Tag, related_name="books")
class Meta:
app_label = "schema"
self.isolated_local_models = [Book._meta.get_field("tags").remote_field.through]
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Book)
new_field = SlugField(max_length=75, unique=True)
new_field.model = Tag
new_field.set_attributes_from_name("slug")
with self.assertLogs("django.db.backends.schema", "DEBUG") as cm:
with connection.schema_editor() as editor:
editor.alter_field(Tag, Tag._meta.get_field("slug"), new_field)
# One SQL statement is executed to alter the field.
self.assertEqual(len(cm.records), 1)
# Ensure that the field is still unique.
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_field_unique_does_not_remove_meta_constraints(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorWithUniqueName)
self.local_models = [AuthorWithUniqueName]
# Add the custom unique constraint
constraint = UniqueConstraint(fields=["name"], name="author_name_uniq")
custom_constraint_name = constraint.name
AuthorWithUniqueName._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(AuthorWithUniqueName, constraint)
# Ensure the constraints exist
constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Alter the column to remove field uniqueness
old_field = AuthorWithUniqueName._meta.get_field("name")
new_field = CharField(max_length=255)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithUniqueName, old_field, new_field, strict=True)
constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 0)
# Alter the column to re-add field uniqueness
new_field2 = AuthorWithUniqueName._meta.get_field("name")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithUniqueName, new_field, new_field2, strict=True)
constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the unique constraint
with connection.schema_editor() as editor:
AuthorWithUniqueName._meta.constraints = []
editor.remove_constraint(AuthorWithUniqueName, constraint)
def test_unique_together(self):
"""
Tests removing and adding unique_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
# Ensure the fields are unique to begin with
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2011, slug="foo")
UniqueTest.objects.create(year=2011, slug="bar")
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter the model to its non-unique-together companion
with connection.schema_editor() as editor:
editor.alter_unique_together(
UniqueTest, UniqueTest._meta.unique_together, []
)
# Ensure the fields are no longer unique
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_unique_together(
UniqueTest, [], UniqueTest._meta.unique_together
)
# Ensure the fields are unique again
UniqueTest.objects.create(year=2012, slug="foo")
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
def test_unique_together_with_fk(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [["author", "title"]])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [["author", "title"]], [])
def test_unique_together_with_fk_with_existing_index(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key, where the foreign key is added after the model is
created.
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithoutAuthor)
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
editor.add_field(BookWithoutAuthor, new_field)
# Ensure the fields aren't unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [["author", "title"]])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [["author", "title"]], [])
def _test_composed_index_with_fk(self, index):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
table = Book._meta.db_table
self.assertEqual(Book._meta.indexes, [])
Book._meta.indexes = [index]
with connection.schema_editor() as editor:
editor.add_index(Book, index)
self.assertIn(index.name, self.get_constraints(table))
Book._meta.indexes = []
with connection.schema_editor() as editor:
editor.remove_index(Book, index)
self.assertNotIn(index.name, self.get_constraints(table))
def test_composed_index_with_fk(self):
index = Index(fields=["author", "title"], name="book_author_title_idx")
self._test_composed_index_with_fk(index)
def test_composed_desc_index_with_fk(self):
index = Index(fields=["-author", "title"], name="book_author_title_idx")
self._test_composed_index_with_fk(index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_composed_func_index_with_fk(self):
index = Index(F("author"), F("title"), name="book_author_title_idx")
self._test_composed_index_with_fk(index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_composed_desc_func_index_with_fk(self):
index = Index(F("author").desc(), F("title"), name="book_author_title_idx")
self._test_composed_index_with_fk(index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_composed_func_transform_index_with_fk(self):
index = Index(F("title__lower"), name="book_title_lower_idx")
with register_lookup(CharField, Lower):
self._test_composed_index_with_fk(index)
def _test_composed_constraint_with_fk(self, constraint):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
table = Book._meta.db_table
self.assertEqual(Book._meta.constraints, [])
Book._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(Book, constraint)
self.assertIn(constraint.name, self.get_constraints(table))
Book._meta.constraints = []
with connection.schema_editor() as editor:
editor.remove_constraint(Book, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
def test_composed_constraint_with_fk(self):
constraint = UniqueConstraint(
fields=["author", "title"],
name="book_author_title_uniq",
)
self._test_composed_constraint_with_fk(constraint)
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
def test_composed_check_constraint_with_fk(self):
constraint = CheckConstraint(check=Q(author__gt=0), name="book_author_check")
self._test_composed_constraint_with_fk(constraint)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_unique_together_does_not_remove_meta_constraints(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorWithUniqueNameAndBirthday)
self.local_models = [AuthorWithUniqueNameAndBirthday]
# Add the custom unique constraint
constraint = UniqueConstraint(
fields=["name", "birthday"], name="author_name_birthday_uniq"
)
custom_constraint_name = constraint.name
AuthorWithUniqueNameAndBirthday._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(AuthorWithUniqueNameAndBirthday, constraint)
# Ensure the constraints exist
constraints = self.get_constraints(
AuthorWithUniqueNameAndBirthday._meta.db_table
)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Remove unique together
unique_together = AuthorWithUniqueNameAndBirthday._meta.unique_together
with connection.schema_editor() as editor:
editor.alter_unique_together(
AuthorWithUniqueNameAndBirthday, unique_together, []
)
constraints = self.get_constraints(
AuthorWithUniqueNameAndBirthday._meta.db_table
)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 0)
# Re-add unique together
with connection.schema_editor() as editor:
editor.alter_unique_together(
AuthorWithUniqueNameAndBirthday, [], unique_together
)
constraints = self.get_constraints(
AuthorWithUniqueNameAndBirthday._meta.db_table
)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the unique constraint
with connection.schema_editor() as editor:
AuthorWithUniqueNameAndBirthday._meta.constraints = []
editor.remove_constraint(AuthorWithUniqueNameAndBirthday, constraint)
def test_unique_constraint(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(fields=["name"], name="name_uq")
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIs(sql.references_table(table), True)
self.assertIs(sql.references_column(table, "name"), True)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(Upper("name").desc(), name="func_upper_uq")
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, constraint.name, ["DESC"])
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
# SQL contains a database function.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_composite_func_unique_constraint(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithSlug)
constraint = UniqueConstraint(
Upper("title"),
Lower("slug"),
name="func_upper_lower_unq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(BookWithSlug, constraint)
sql = constraint.create_sql(BookWithSlug, editor)
table = BookWithSlug._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
# SQL contains database functions.
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "slug"), True)
sql = str(sql)
self.assertIn("UPPER(%s)" % editor.quote_name("title"), sql)
self.assertIn("LOWER(%s)" % editor.quote_name("slug"), sql)
self.assertLess(sql.index("UPPER"), sql.index("LOWER"))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(BookWithSlug, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_unique_constraint_field_and_expression(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
F("height").desc(),
"uuid",
Lower("name").asc(),
name="func_f_lower_field_unq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, constraint.name, ["DESC", "ASC", "ASC"])
constraints = self.get_constraints(table)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertEqual(len(constraints[constraint.name]["columns"]), 3)
self.assertEqual(constraints[constraint.name]["columns"][1], "uuid")
# SQL contains database functions and columns.
self.assertIs(sql.references_column(table, "height"), True)
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "uuid"), True)
self.assertIn("LOWER(%s)" % editor.quote_name("name"), str(sql))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_partial_indexes")
def test_func_unique_constraint_partial(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
Upper("name"),
name="func_upper_cond_weight_uq",
condition=Q(weight__isnull=False),
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql))
self.assertIn(
"WHERE %s IS NOT NULL" % editor.quote_name("weight"),
str(sql),
)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_covering_indexes")
def test_func_unique_constraint_covering(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
Upper("name"),
name="func_upper_covering_uq",
include=["weight", "height"],
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertEqual(
constraints[constraint.name]["columns"],
[None, "weight", "height"],
)
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "weight"), True)
self.assertIs(sql.references_column(table, "height"), True)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql))
self.assertIn(
"INCLUDE (%s, %s)"
% (
editor.quote_name("weight"),
editor.quote_name("height"),
),
str(sql),
)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_lookups(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
with register_lookup(CharField, Lower), register_lookup(IntegerField, Abs):
constraint = UniqueConstraint(
F("name__lower"),
F("weight__abs"),
name="func_lower_abs_lookup_uq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
# SQL contains columns.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "weight"), True)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_collate(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithSlug)
constraint = UniqueConstraint(
Collate(F("title"), collation=collation).desc(),
Collate("slug", collation=collation),
name="func_collate_uq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(BookWithSlug, constraint)
sql = constraint.create_sql(BookWithSlug, editor)
table = BookWithSlug._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, constraint.name, ["DESC", "ASC"])
# SQL contains columns and a collation.
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "slug"), True)
self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(BookWithSlug, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipIfDBFeature("supports_expression_indexes")
def test_func_unique_constraint_unsupported(self):
# UniqueConstraint is ignored on databases that don't support indexes on
# expressions.
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(F("name"), name="func_name_uq")
with connection.schema_editor() as editor, self.assertNumQueries(0):
self.assertIsNone(editor.add_constraint(Author, constraint))
self.assertIsNone(editor.remove_constraint(Author, constraint))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_nonexistent_field(self):
constraint = UniqueConstraint(Lower("nonexistent"), name="func_nonexistent_uq")
msg = (
"Cannot resolve keyword 'nonexistent' into field. Choices are: "
"height, id, name, uuid, weight"
)
with self.assertRaisesMessage(FieldError, msg):
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_nondeterministic(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(Random(), name="func_random_uq")
with connection.schema_editor() as editor:
with self.assertRaises(DatabaseError):
editor.add_constraint(Author, constraint)
@ignore_warnings(category=RemovedInDjango51Warning)
def test_index_together(self):
"""
Tests removing and adding index_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure there's no index on the year/slug columns first
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c["columns"] == ["slug", "title"]
),
False,
)
# Alter the model to add an index
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [], [("slug", "title")])
# Ensure there is now an index
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c["columns"] == ["slug", "title"]
),
True,
)
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [("slug", "title")], [])
# Ensure there's no index
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c["columns"] == ["slug", "title"]
),
False,
)
@ignore_warnings(category=RemovedInDjango51Warning)
def test_index_together_with_fk(self):
"""
Tests removing and adding index_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.index_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [], [["author", "title"]])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [["author", "title"]], [])
@ignore_warnings(category=RemovedInDjango51Warning)
@isolate_apps("schema")
def test_create_index_together(self):
"""
Tests creating models with index_together already defined
"""
class TagIndexed(Model):
title = CharField(max_length=255)
slug = SlugField(unique=True)
class Meta:
app_label = "schema"
index_together = [["slug", "title"]]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(TagIndexed)
self.isolated_local_models = [TagIndexed]
# Ensure there is an index
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tagindexed").values()
if c["columns"] == ["slug", "title"]
),
True,
)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
@ignore_warnings(category=RemovedInDjango51Warning)
@isolate_apps("schema")
def test_remove_index_together_does_not_remove_meta_indexes(self):
class AuthorWithIndexedNameAndBirthday(Model):
name = CharField(max_length=255)
birthday = DateField()
class Meta:
app_label = "schema"
index_together = [["name", "birthday"]]
with connection.schema_editor() as editor:
editor.create_model(AuthorWithIndexedNameAndBirthday)
self.isolated_local_models = [AuthorWithIndexedNameAndBirthday]
# Add the custom index
index = Index(fields=["name", "birthday"], name="author_name_birthday_idx")
custom_index_name = index.name
AuthorWithIndexedNameAndBirthday._meta.indexes = [index]
with connection.schema_editor() as editor:
editor.add_index(AuthorWithIndexedNameAndBirthday, index)
# Ensure the indexes exist
constraints = self.get_constraints(
AuthorWithIndexedNameAndBirthday._meta.db_table
)
self.assertIn(custom_index_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["index"]
and name != custom_index_name
]
self.assertEqual(len(other_constraints), 1)
# Remove index together
index_together = AuthorWithIndexedNameAndBirthday._meta.index_together
with connection.schema_editor() as editor:
editor.alter_index_together(
AuthorWithIndexedNameAndBirthday, index_together, []
)
constraints = self.get_constraints(
AuthorWithIndexedNameAndBirthday._meta.db_table
)
self.assertIn(custom_index_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["index"]
and name != custom_index_name
]
self.assertEqual(len(other_constraints), 0)
# Re-add index together
with connection.schema_editor() as editor:
editor.alter_index_together(
AuthorWithIndexedNameAndBirthday, [], index_together
)
constraints = self.get_constraints(
AuthorWithIndexedNameAndBirthday._meta.db_table
)
self.assertIn(custom_index_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["index"]
and name != custom_index_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the index
with connection.schema_editor() as editor:
AuthorWithIndexedNameAndBirthday._meta.indexes = []
editor.remove_index(AuthorWithIndexedNameAndBirthday, index)
@isolate_apps("schema")
def test_db_table(self):
"""
Tests renaming of the table
"""
class Author(Model):
name = CharField(max_length=255)
class Meta:
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = "schema"
# Create the table and one referring it.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there to begin with
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
# Alter the table
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
editor.alter_db_table(Author, "schema_author", "schema_otherauthor")
Author._meta.db_table = "schema_otherauthor"
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
# Ensure the foreign key reference was updated
self.assertForeignKeyExists(Book, "author_id", "schema_otherauthor")
# Alter the table again
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
editor.alter_db_table(Author, "schema_otherauthor", "schema_author")
# Ensure the table is still there
Author._meta.db_table = "schema_author"
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
def test_add_remove_index(self):
"""
Tests index addition and removal
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the table is there and has no index
self.assertNotIn("title", self.get_indexes(Author._meta.db_table))
# Add the index
index = Index(fields=["name"], name="author_title_idx")
with connection.schema_editor() as editor:
editor.add_index(Author, index)
self.assertIn("name", self.get_indexes(Author._meta.db_table))
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn("name", self.get_indexes(Author._meta.db_table))
def test_remove_db_index_doesnt_remove_custom_indexes(self):
"""
Changing db_index to False doesn't remove indexes from Meta.indexes.
"""
with connection.schema_editor() as editor:
editor.create_model(AuthorWithIndexedName)
self.local_models = [AuthorWithIndexedName]
# Ensure the table has its index
self.assertIn("name", self.get_indexes(AuthorWithIndexedName._meta.db_table))
# Add the custom index
index = Index(fields=["-name"], name="author_name_idx")
author_index_name = index.name
with connection.schema_editor() as editor:
db_index_name = editor._create_index_name(
table_name=AuthorWithIndexedName._meta.db_table,
column_names=("name",),
)
try:
AuthorWithIndexedName._meta.indexes = [index]
with connection.schema_editor() as editor:
editor.add_index(AuthorWithIndexedName, index)
old_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table)
self.assertIn(author_index_name, old_constraints)
self.assertIn(db_index_name, old_constraints)
# Change name field to db_index=False
old_field = AuthorWithIndexedName._meta.get_field("name")
new_field = CharField(max_length=255)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorWithIndexedName, old_field, new_field, strict=True
)
new_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table)
self.assertNotIn(db_index_name, new_constraints)
# The index from Meta.indexes is still in the database.
self.assertIn(author_index_name, new_constraints)
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(AuthorWithIndexedName, index)
finally:
AuthorWithIndexedName._meta.indexes = []
def test_order_index(self):
"""
Indexes defined with ordering (ASC/DESC) defined on column
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
# The table doesn't have an index
self.assertNotIn("title", self.get_indexes(Author._meta.db_table))
index_name = "author_name_idx"
# Add the index
index = Index(fields=["name", "-weight"], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(Author, index)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(Author._meta.db_table, index_name, ["ASC", "DESC"])
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
def test_indexes(self):
"""
Tests creation/altering of indexes
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there and has the right index
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to remove the index
old_field = Book._meta.get_field("title")
new_field = CharField(max_length=100, db_index=False)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the table is there and has no index
self.assertNotIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to re-add the index
new_field2 = Book._meta.get_field("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, new_field, new_field2, strict=True)
# Ensure the table is there and has the index again
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Add a unique column, verify that creates an implicit index
new_field3 = BookWithSlug._meta.get_field("slug")
with connection.schema_editor() as editor:
editor.add_field(Book, new_field3)
self.assertIn(
"slug",
self.get_uniques(Book._meta.db_table),
)
# Remove the unique, check the index goes with it
new_field4 = CharField(max_length=20, unique=False)
new_field4.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True)
self.assertNotIn(
"slug",
self.get_uniques(Book._meta.db_table),
)
def test_text_field_with_db_index(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorTextFieldWithIndex)
# The text_field index is present if the database supports it.
assertion = (
self.assertIn
if connection.features.supports_index_on_text_field
else self.assertNotIn
)
assertion(
"text_field", self.get_indexes(AuthorTextFieldWithIndex._meta.db_table)
)
def _index_expressions_wrappers(self):
index_expression = IndexExpression()
index_expression.set_wrapper_classes(connection)
return ", ".join(
[
wrapper_cls.__qualname__
for wrapper_cls in index_expression.wrapper_classes
]
)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_multiple_wrapper_references(self):
index = Index(OrderBy(F("name").desc(), descending=True), name="name")
msg = (
"Multiple references to %s can't be used in an indexed expression."
% self._index_expressions_wrappers()
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
editor.add_index(Author, index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_invalid_topmost_expressions(self):
index = Index(Upper(F("name").desc()), name="name")
msg = (
"%s must be topmost expressions in an indexed expression."
% self._index_expressions_wrappers()
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
editor.add_index(Author, index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Lower("name").desc(), name="func_lower_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC"])
# SQL contains a database function.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("LOWER(%s)" % editor.quote_name("name"), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_f(self):
with connection.schema_editor() as editor:
editor.create_model(Tag)
index = Index("slug", F("title").desc(), name="func_f_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Tag, index)
sql = index.create_sql(Tag, editor)
table = Tag._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(Tag._meta.db_table, index.name, ["ASC", "DESC"])
# SQL contains columns.
self.assertIs(sql.references_column(table, "slug"), True)
self.assertIs(sql.references_column(table, "title"), True)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Tag, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_lookups(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
with register_lookup(CharField, Lower), register_lookup(IntegerField, Abs):
index = Index(
F("name__lower"),
F("weight__abs"),
name="func_lower_abs_lookup_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
# SQL contains columns.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "weight"), True)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_composite_func_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Lower("name"), Upper("name"), name="func_lower_upper_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
# SQL contains database functions.
self.assertIs(sql.references_column(table, "name"), True)
sql = str(sql)
self.assertIn("LOWER(%s)" % editor.quote_name("name"), sql)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), sql)
self.assertLess(sql.index("LOWER"), sql.index("UPPER"))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_composite_func_index_field_and_expression(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
index = Index(
F("author").desc(),
Lower("title").asc(),
"pub_date",
name="func_f_lower_field_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Book, index)
sql = index.create_sql(Book, editor)
table = Book._meta.db_table
constraints = self.get_constraints(table)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC", "ASC", "ASC"])
self.assertEqual(len(constraints[index.name]["columns"]), 3)
self.assertEqual(constraints[index.name]["columns"][2], "pub_date")
# SQL contains database functions and columns.
self.assertIs(sql.references_column(table, "author_id"), True)
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "pub_date"), True)
self.assertIn("LOWER(%s)" % editor.quote_name("title"), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Book, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
@isolate_apps("schema")
def test_func_index_f_decimalfield(self):
class Node(Model):
value = DecimalField(max_digits=5, decimal_places=2)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Node)
index = Index(F("value"), name="func_f_decimalfield_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Node, index)
sql = index.create_sql(Node, editor)
table = Node._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "value"), True)
# SQL doesn't contain casting.
self.assertNotIn("CAST", str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Node, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_cast(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Cast("weight", FloatField()), name="func_cast_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "weight"), True)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_collate(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithSlug)
index = Index(
Collate(F("title"), collation=collation).desc(),
Collate("slug", collation=collation),
name="func_collate_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(BookWithSlug, index)
sql = index.create_sql(BookWithSlug, editor)
table = Book._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC", "ASC"])
# SQL contains columns and a collation.
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "slug"), True)
self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Book, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
@skipIfDBFeature("collate_as_index_expression")
def test_func_index_collate_f_ordered(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(
Collate(F("name").desc(), collation=collation),
name="func_collate_f_desc_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC"])
# SQL contains columns and a collation.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_calc(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(F("height") / (F("weight") + Value(5)), name="func_calc_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
# SQL contains columns and expressions.
self.assertIs(sql.references_column(table, "height"), True)
self.assertIs(sql.references_column(table, "weight"), True)
sql = str(sql)
self.assertIs(
sql.index(editor.quote_name("height"))
< sql.index("/")
< sql.index(editor.quote_name("weight"))
< sql.index("+")
< sql.index("5"),
True,
)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_json_field")
@isolate_apps("schema")
def test_func_index_json_key_transform(self):
class JSONModel(Model):
field = JSONField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(JSONModel)
self.isolated_local_models = [JSONModel]
index = Index("field__some_key", name="func_json_key_idx")
with connection.schema_editor() as editor:
editor.add_index(JSONModel, index)
sql = index.create_sql(JSONModel, editor)
table = JSONModel._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "field"), True)
with connection.schema_editor() as editor:
editor.remove_index(JSONModel, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_json_field")
@isolate_apps("schema")
def test_func_index_json_key_transform_cast(self):
class JSONModel(Model):
field = JSONField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(JSONModel)
self.isolated_local_models = [JSONModel]
index = Index(
Cast(KeyTextTransform("some_key", "field"), IntegerField()),
name="func_json_key_cast_idx",
)
with connection.schema_editor() as editor:
editor.add_index(JSONModel, index)
sql = index.create_sql(JSONModel, editor)
table = JSONModel._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "field"), True)
with connection.schema_editor() as editor:
editor.remove_index(JSONModel, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipIfDBFeature("supports_expression_indexes")
def test_func_index_unsupported(self):
# Index is ignored on databases that don't support indexes on
# expressions.
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(F("name"), name="random_idx")
with connection.schema_editor() as editor, self.assertNumQueries(0):
self.assertIsNone(editor.add_index(Author, index))
self.assertIsNone(editor.remove_index(Author, index))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_nonexistent_field(self):
index = Index(Lower("nonexistent"), name="func_nonexistent_idx")
msg = (
"Cannot resolve keyword 'nonexistent' into field. Choices are: "
"height, id, name, uuid, weight"
)
with self.assertRaisesMessage(FieldError, msg):
with connection.schema_editor() as editor:
editor.add_index(Author, index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_nondeterministic(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Random(), name="func_random_idx")
with connection.schema_editor() as editor:
with self.assertRaises(DatabaseError):
editor.add_index(Author, index)
def test_primary_key(self):
"""
Tests altering of the primary key
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the table is there and has the right PK
self.assertEqual(self.get_primary_key(Tag._meta.db_table), "id")
# Alter to change the PK
id_field = Tag._meta.get_field("id")
old_field = Tag._meta.get_field("slug")
new_field = SlugField(primary_key=True)
new_field.set_attributes_from_name("slug")
new_field.model = Tag
with connection.schema_editor() as editor:
editor.remove_field(Tag, id_field)
editor.alter_field(Tag, old_field, new_field)
# Ensure the PK changed
self.assertNotIn(
"id",
self.get_indexes(Tag._meta.db_table),
)
self.assertEqual(self.get_primary_key(Tag._meta.db_table), "slug")
def test_alter_primary_key_the_same_name(self):
with connection.schema_editor() as editor:
editor.create_model(Thing)
old_field = Thing._meta.get_field("when")
new_field = CharField(max_length=2, primary_key=True)
new_field.set_attributes_from_name("when")
new_field.model = Thing
with connection.schema_editor() as editor:
editor.alter_field(Thing, old_field, new_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
with connection.schema_editor() as editor:
editor.alter_field(Thing, new_field, old_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
def test_context_manager_exit(self):
"""
Ensures transaction is correctly closed when an error occurs
inside a SchemaEditor context.
"""
class SomeError(Exception):
pass
try:
with connection.schema_editor():
raise SomeError
except SomeError:
self.assertFalse(connection.in_atomic_block)
@skipIfDBFeature("can_rollback_ddl")
def test_unsupported_transactional_ddl_disallowed(self):
message = (
"Executing DDL statements while in a transaction on databases "
"that can't perform a rollback is prohibited."
)
with atomic(), connection.schema_editor() as editor:
with self.assertRaisesMessage(TransactionManagementError, message):
editor.execute(
editor.sql_create_table % {"table": "foo", "definition": ""}
)
@skipUnlessDBFeature("supports_foreign_keys", "indexes_foreign_keys")
def test_foreign_key_index_long_names_regression(self):
"""
Regression test for #21497.
Only affects databases that supports foreign keys.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Find the properly shortened column name
column_name = connection.ops.quote_name(
"author_foreign_key_with_really_long_field_name_id"
)
column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase
# Ensure the table is there and has an index on the column
self.assertIn(
column_name,
self.get_indexes(BookWithLongName._meta.db_table),
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_add_foreign_key_long_names(self):
"""
Regression test for #23009.
Only affects databases that supports foreign keys.
"""
# Create the initial tables
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Add a second FK, this would fail due to long ref name before the fix
new_field = ForeignKey(
AuthorWithEvenLongerName, CASCADE, related_name="something"
)
new_field.set_attributes_from_name(
"author_other_really_long_named_i_mean_so_long_fk"
)
with connection.schema_editor() as editor:
editor.add_field(BookWithLongName, new_field)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_foreign_keys")
def test_add_foreign_key_quoted_db_table(self):
class Author(Model):
class Meta:
db_table = '"table_author_double_quoted"'
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
self.isolated_local_models = [Author]
if connection.vendor == "mysql":
self.assertForeignKeyExists(
Book, "author_id", '"table_author_double_quoted"'
)
else:
self.assertForeignKeyExists(Book, "author_id", "table_author_double_quoted")
def test_add_foreign_object(self):
with connection.schema_editor() as editor:
editor.create_model(BookForeignObj)
self.local_models = [BookForeignObj]
new_field = ForeignObject(
Author, on_delete=CASCADE, from_fields=["author_id"], to_fields=["id"]
)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.add_field(BookForeignObj, new_field)
def test_creation_deletion_reserved_names(self):
"""
Tries creating a model's table, and then deleting it when it has a
SQL reserved name.
"""
# Create the table
with connection.schema_editor() as editor:
try:
editor.create_model(Thing)
except OperationalError as e:
self.fail(
"Errors when applying initial migration for a model "
"with a table named after an SQL reserved word: %s" % e
)
# The table is there
list(Thing.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Thing)
# The table is gone
with self.assertRaises(DatabaseError):
list(Thing.objects.all())
def test_remove_constraints_capital_letters(self):
"""
#23065 - Constraint names must be quoted if they contain capital letters.
"""
def get_field(*args, field_class=IntegerField, **kwargs):
kwargs["db_column"] = "CamelCase"
field = field_class(*args, **kwargs)
field.set_attributes_from_name("CamelCase")
return field
model = Author
field = get_field()
table = model._meta.db_table
column = field.column
identifier_converter = connection.introspection.identifier_converter
with connection.schema_editor() as editor:
editor.create_model(model)
editor.add_field(model, field)
constraint_name = "CamelCaseIndex"
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(
editor.sql_create_index
% {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"using": "",
"columns": editor.quote_name(column),
"extra": "",
"condition": "",
"include": "",
}
)
self.assertIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
editor.alter_field(model, get_field(db_index=True), field, strict=True)
self.assertNotIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
constraint_name = "CamelCaseUniqConstraint"
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(editor._create_unique_sql(model, [field], constraint_name))
self.assertIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
editor.alter_field(model, get_field(unique=True), field, strict=True)
self.assertNotIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
if editor.sql_create_fk and connection.features.can_introspect_foreign_keys:
constraint_name = "CamelCaseFKConstraint"
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(
editor.sql_create_fk
% {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"column": editor.quote_name(column),
"to_table": editor.quote_name(table),
"to_column": editor.quote_name(model._meta.auto_field.column),
"deferrable": connection.ops.deferrable_sql(),
}
)
self.assertIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
editor.alter_field(
model,
get_field(Author, CASCADE, field_class=ForeignKey),
field,
strict=True,
)
self.assertNotIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
def test_add_field_use_effective_default(self):
"""
#23987 - effective_default() should be used as the field default when
adding a new field.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name="Anonymous1")
# Add new CharField to ensure default will be used from effective_default
new_field = CharField(max_length=15, blank=True)
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(
item[0],
None if connection.features.interprets_empty_strings_as_nulls else "",
)
def test_add_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name="Anonymous1")
# Add new CharField with a default
new_field = CharField(max_length=15, blank=True, default="surname default")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], "surname default")
# And that the default is no longer set in the database.
field = next(
f
for f in connection.introspection.get_table_description(
cursor, "schema_author"
)
if f.name == "surname"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_add_field_default_nullable(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add new nullable CharField with a default.
new_field = CharField(max_length=15, blank=True, null=True, default="surname")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
Author.objects.create(name="Anonymous1")
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertIsNone(item[0])
field = next(
f
for f in connection.introspection.get_table_description(
cursor,
"schema_author",
)
if f.name == "surname"
)
# Field is still nullable.
self.assertTrue(field.null_ok)
# The database default is no longer set.
if connection.features.can_introspect_default:
self.assertIn(field.default, ["NULL", None])
def test_add_textfield_default_nullable(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add new nullable TextField with a default.
new_field = TextField(blank=True, null=True, default="text")
new_field.set_attributes_from_name("description")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
Author.objects.create(name="Anonymous1")
with connection.cursor() as cursor:
cursor.execute("SELECT description FROM schema_author;")
item = cursor.fetchall()[0]
self.assertIsNone(item[0])
field = next(
f
for f in connection.introspection.get_table_description(
cursor,
"schema_author",
)
if f.name == "description"
)
# Field is still nullable.
self.assertTrue(field.null_ok)
# The database default is no longer set.
if connection.features.can_introspect_default:
self.assertIn(field.default, ["NULL", None])
def test_alter_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name="Anonymous1")
self.assertIsNone(Author.objects.get().height)
old_field = Author._meta.get_field("height")
# The default from the new field is used in updating existing rows.
new_field = IntegerField(blank=True, default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(Author.objects.get().height, 42)
# The database default should be removed.
with connection.cursor() as cursor:
field = next(
f
for f in connection.introspection.get_table_description(
cursor, "schema_author"
)
if f.name == "height"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_alter_field_default_doesnt_perform_queries(self):
"""
No queries are performed if a field default changes and the field's
not changing from null to non-null.
"""
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_default = old_field.default * 2
new_field = PositiveIntegerField(null=True, blank=True, default=new_default)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(
AuthorWithDefaultHeight, old_field, new_field, strict=True
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_fk_attributes_noop(self):
"""
No queries are performed when changing field attributes that don't
affect the schema.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
old_field = Book._meta.get_field("author")
new_field = ForeignKey(
Author,
blank=True,
editable=False,
error_messages={"invalid": "error message"},
help_text="help text",
limit_choices_to={"limit": "choice"},
on_delete=PROTECT,
related_name="related_name",
related_query_name="related_query_name",
validators=[lambda x: x],
verbose_name="verbose name",
)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Book, old_field, new_field, strict=True)
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Book, new_field, old_field, strict=True)
def test_alter_field_choices_noop(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("name")
new_field = CharField(
choices=(("Jane", "Jane"), ("Joe", "Joe")),
max_length=255,
)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Author, old_field, new_field, strict=True)
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Author, new_field, old_field, strict=True)
def test_add_textfield_unhashable_default(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name="Anonymous1")
# Create a field that has an unhashable default
new_field = TextField(default={})
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_add_indexed_charfield(self):
field = CharField(max_length=255, db_index=True)
field.set_attributes_from_name("nom_de_plume")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
# Should create two indexes; one for like operator.
self.assertEqual(
self.get_constraints_for_column(Author, "nom_de_plume"),
[
"schema_author_nom_de_plume_7570a851",
"schema_author_nom_de_plume_7570a851_like",
],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_add_unique_charfield(self):
field = CharField(max_length=255, unique=True)
field.set_attributes_from_name("nom_de_plume")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
# Should create two indexes; one for like operator.
self.assertEqual(
self.get_constraints_for_column(Author, "nom_de_plume"),
[
"schema_author_nom_de_plume_7570a851_like",
"schema_author_nom_de_plume_key",
],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_index_to_charfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
# Alter to add db_index=True and create 2 indexes.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, db_index=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, "name"),
["schema_author_name_1fbc5617", "schema_author_name_1fbc5617_like"],
)
# Remove db_index=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_unique_to_charfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
# Alter to add unique=True and create 2 indexes.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, "name"),
["schema_author_name_1fbc5617_like", "schema_author_name_1fbc5617_uniq"],
)
# Remove unique=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_index_to_textfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Note)
self.assertEqual(self.get_constraints_for_column(Note, "info"), [])
# Alter to add db_index=True and create 2 indexes.
old_field = Note._meta.get_field("info")
new_field = TextField(db_index=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Note, "info"),
["schema_note_info_4b0ea695", "schema_note_info_4b0ea695_like"],
)
# Remove db_index=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Note, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Note, "info"), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_unique_to_charfield_with_db_index(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
# Alter to add unique=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field("title")
new_field = CharField(max_length=100, db_index=True, unique=True)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"],
)
# Alter to remove unique=True (should drop unique index)
new_field2 = CharField(max_length=100, db_index=True)
new_field2.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_remove_unique_and_db_index_from_charfield(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
# Alter to add unique=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field("title")
new_field = CharField(max_length=100, db_index=True, unique=True)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"],
)
# Alter to remove both unique=True and db_index=True (should drop all indexes)
new_field2 = CharField(max_length=100)
new_field2.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"), []
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_swap_unique_and_db_index_with_charfield(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
# Alter to set unique=True and remove db_index=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field("title")
new_field = CharField(max_length=100, unique=True)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"],
)
# Alter to set db_index=True and remove unique=True (should restore index)
new_field2 = CharField(max_length=100, db_index=True)
new_field2.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_db_index_to_charfield_with_unique(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Tag)
self.assertEqual(
self.get_constraints_for_column(Tag, "slug"),
["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"],
)
# Alter to add db_index=True
old_field = Tag._meta.get_field("slug")
new_field = SlugField(db_index=True, unique=True)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Tag, "slug"),
["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"],
)
# Alter to remove db_index=True
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(Tag, "slug"),
["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"],
)
def test_alter_field_add_index_to_integerfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, "weight"), [])
# Alter to add db_index=True and create index.
old_field = Author._meta.get_field("weight")
new_field = IntegerField(null=True, db_index=True)
new_field.set_attributes_from_name("weight")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, "weight"),
["schema_author_weight_587740f9"],
)
# Remove db_index=True to drop index.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, "weight"), [])
def test_alter_pk_with_self_referential_field(self):
"""
Changing the primary key field name of a model with a self-referential
foreign key (#26384).
"""
with connection.schema_editor() as editor:
editor.create_model(Node)
old_field = Node._meta.get_field("node_id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(Node, old_field, new_field, strict=True)
self.assertForeignKeyExists(Node, "parent_id", Node._meta.db_table)
@mock.patch("django.db.backends.base.schema.datetime")
@mock.patch("django.db.backends.base.schema.timezone")
def test_add_datefield_and_datetimefield_use_effective_default(
self, mocked_datetime, mocked_tz
):
"""
effective_default() should be used for DateField, DateTimeField, and
TimeField if auto_now or auto_now_add is set (#25005).
"""
now = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1)
now_tz = datetime.datetime(
month=1, day=1, year=2000, hour=1, minute=1, tzinfo=datetime.timezone.utc
)
mocked_datetime.now = mock.MagicMock(return_value=now)
mocked_tz.now = mock.MagicMock(return_value=now_tz)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Check auto_now/auto_now_add attributes are not defined
columns = self.column_classes(Author)
self.assertNotIn("dob_auto_now", columns)
self.assertNotIn("dob_auto_now_add", columns)
self.assertNotIn("dtob_auto_now", columns)
self.assertNotIn("dtob_auto_now_add", columns)
self.assertNotIn("tob_auto_now", columns)
self.assertNotIn("tob_auto_now_add", columns)
# Create a row
Author.objects.create(name="Anonymous1")
# Ensure fields were added with the correct defaults
dob_auto_now = DateField(auto_now=True)
dob_auto_now.set_attributes_from_name("dob_auto_now")
self.check_added_field_default(
editor,
Author,
dob_auto_now,
"dob_auto_now",
now.date(),
cast_function=lambda x: x.date(),
)
dob_auto_now_add = DateField(auto_now_add=True)
dob_auto_now_add.set_attributes_from_name("dob_auto_now_add")
self.check_added_field_default(
editor,
Author,
dob_auto_now_add,
"dob_auto_now_add",
now.date(),
cast_function=lambda x: x.date(),
)
dtob_auto_now = DateTimeField(auto_now=True)
dtob_auto_now.set_attributes_from_name("dtob_auto_now")
self.check_added_field_default(
editor,
Author,
dtob_auto_now,
"dtob_auto_now",
now,
)
dt_tm_of_birth_auto_now_add = DateTimeField(auto_now_add=True)
dt_tm_of_birth_auto_now_add.set_attributes_from_name("dtob_auto_now_add")
self.check_added_field_default(
editor,
Author,
dt_tm_of_birth_auto_now_add,
"dtob_auto_now_add",
now,
)
tob_auto_now = TimeField(auto_now=True)
tob_auto_now.set_attributes_from_name("tob_auto_now")
self.check_added_field_default(
editor,
Author,
tob_auto_now,
"tob_auto_now",
now.time(),
cast_function=lambda x: x.time(),
)
tob_auto_now_add = TimeField(auto_now_add=True)
tob_auto_now_add.set_attributes_from_name("tob_auto_now_add")
self.check_added_field_default(
editor,
Author,
tob_auto_now_add,
"tob_auto_now_add",
now.time(),
cast_function=lambda x: x.time(),
)
def test_namespaced_db_table_create_index_name(self):
"""
Table names are stripped of their namespace/schema before being used to
generate index names.
"""
with connection.schema_editor() as editor:
max_name_length = connection.ops.max_name_length() or 200
namespace = "n" * max_name_length
table_name = "t" * max_name_length
namespaced_table_name = '"%s"."%s"' % (namespace, table_name)
self.assertEqual(
editor._create_index_name(table_name, []),
editor._create_index_name(namespaced_table_name, []),
)
@unittest.skipUnless(
connection.vendor == "oracle", "Oracle specific db_table syntax"
)
def test_creation_with_db_table_double_quotes(self):
oracle_user = connection.creation._test_database_user()
class Student(Model):
name = CharField(max_length=30)
class Meta:
app_label = "schema"
apps = new_apps
db_table = '"%s"."DJANGO_STUDENT_TABLE"' % oracle_user
class Document(Model):
name = CharField(max_length=30)
students = ManyToManyField(Student)
class Meta:
app_label = "schema"
apps = new_apps
db_table = '"%s"."DJANGO_DOCUMENT_TABLE"' % oracle_user
self.isolated_local_models = [Student, Document]
with connection.schema_editor() as editor:
editor.create_model(Student)
editor.create_model(Document)
doc = Document.objects.create(name="Test Name")
student = Student.objects.create(name="Some man")
doc.students.add(student)
@isolate_apps("schema")
@unittest.skipUnless(
connection.vendor == "postgresql", "PostgreSQL specific db_table syntax."
)
def test_namespaced_db_table_foreign_key_reference(self):
with connection.cursor() as cursor:
cursor.execute("CREATE SCHEMA django_schema_tests")
def delete_schema():
with connection.cursor() as cursor:
cursor.execute("DROP SCHEMA django_schema_tests CASCADE")
self.addCleanup(delete_schema)
class Author(Model):
class Meta:
app_label = "schema"
class Book(Model):
class Meta:
app_label = "schema"
db_table = '"django_schema_tests"."schema_book"'
author = ForeignKey(Author, CASCADE)
author.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.add_field(Book, author)
def test_rename_table_renames_deferred_sql_references(self):
atomic_rename = connection.features.supports_atomic_references_rename
with connection.schema_editor(atomic=atomic_rename) as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.alter_db_table(Author, "schema_author", "schema_renamed_author")
editor.alter_db_table(Author, "schema_book", "schema_renamed_book")
try:
self.assertGreater(len(editor.deferred_sql), 0)
for statement in editor.deferred_sql:
self.assertIs(statement.references_table("schema_author"), False)
self.assertIs(statement.references_table("schema_book"), False)
finally:
editor.alter_db_table(Author, "schema_renamed_author", "schema_author")
editor.alter_db_table(Author, "schema_renamed_book", "schema_book")
def test_rename_column_renames_deferred_sql_references(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
old_title = Book._meta.get_field("title")
new_title = CharField(max_length=100, db_index=True)
new_title.set_attributes_from_name("renamed_title")
editor.alter_field(Book, old_title, new_title)
old_author = Book._meta.get_field("author")
new_author = ForeignKey(Author, CASCADE)
new_author.set_attributes_from_name("renamed_author")
editor.alter_field(Book, old_author, new_author)
self.assertGreater(len(editor.deferred_sql), 0)
for statement in editor.deferred_sql:
self.assertIs(statement.references_column("book", "title"), False)
self.assertIs(statement.references_column("book", "author_id"), False)
@isolate_apps("schema")
def test_referenced_field_without_constraint_rename_inside_atomic_block(self):
"""
Foreign keys without database level constraint don't prevent the field
they reference from being renamed in an atomic block.
"""
class Foo(Model):
field = CharField(max_length=255, unique=True)
class Meta:
app_label = "schema"
class Bar(Model):
foo = ForeignKey(Foo, CASCADE, to_field="field", db_constraint=False)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo, Bar]
with connection.schema_editor() as editor:
editor.create_model(Foo)
editor.create_model(Bar)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("renamed")
with connection.schema_editor(atomic=True) as editor:
editor.alter_field(Foo, Foo._meta.get_field("field"), new_field)
@isolate_apps("schema")
def test_referenced_table_without_constraint_rename_inside_atomic_block(self):
"""
Foreign keys without database level constraint don't prevent the table
they reference from being renamed in an atomic block.
"""
class Foo(Model):
field = CharField(max_length=255, unique=True)
class Meta:
app_label = "schema"
class Bar(Model):
foo = ForeignKey(Foo, CASCADE, to_field="field", db_constraint=False)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo, Bar]
with connection.schema_editor() as editor:
editor.create_model(Foo)
editor.create_model(Bar)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("renamed")
with connection.schema_editor(atomic=True) as editor:
editor.alter_db_table(Foo, Foo._meta.db_table, "renamed_table")
Foo._meta.db_table = "renamed_table"
@isolate_apps("schema")
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_db_collation_charfield(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
class Foo(Model):
field = CharField(max_length=255, db_collation=collation)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo]
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.assertEqual(
self.get_column_collation(Foo._meta.db_table, "field"),
collation,
)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_collation_on_textfield")
def test_db_collation_textfield(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
class Foo(Model):
field = TextField(db_collation=collation)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo]
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.assertEqual(
self.get_column_collation(Foo._meta.db_table, "field"),
collation,
)
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_add_field_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Author)
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("alias")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertEqual(
columns["alias"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(columns["alias"][1][8], collation)
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_alter_field_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("name")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_column_collation(Author._meta.db_table, "name"),
collation,
)
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertIsNone(self.get_column_collation(Author._meta.db_table, "name"))
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_alter_primary_key_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Thing)
old_field = Thing._meta.get_field("when")
new_field = CharField(max_length=1, db_collation=collation, primary_key=True)
new_field.set_attributes_from_name("when")
new_field.model = Thing
with connection.schema_editor() as editor:
editor.alter_field(Thing, old_field, new_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
self.assertEqual(
self.get_column_collation(Thing._meta.db_table, "when"),
collation,
)
with connection.schema_editor() as editor:
editor.alter_field(Thing, new_field, old_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
self.assertIsNone(self.get_column_collation(Thing._meta.db_table, "when"))
@skipUnlessDBFeature(
"supports_collation_on_charfield", "supports_collation_on_textfield"
)
def test_alter_field_type_and_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("info")
new_field.model = Note
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(
columns["info"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(columns["info"][1][8], collation)
with connection.schema_editor() as editor:
editor.alter_field(Note, new_field, old_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns["info"][0], "TextField")
self.assertIsNone(columns["info"][1][8])
@skipUnlessDBFeature(
"supports_collation_on_charfield",
"supports_non_deterministic_collations",
)
def test_ci_cs_db_collation(self):
cs_collation = connection.features.test_collations.get("cs")
ci_collation = connection.features.test_collations.get("ci")
try:
if connection.vendor == "mysql":
cs_collation = "latin1_general_cs"
elif connection.vendor == "postgresql":
cs_collation = "en-x-icu"
with connection.cursor() as cursor:
cursor.execute(
"CREATE COLLATION IF NOT EXISTS case_insensitive "
"(provider = icu, locale = 'und-u-ks-level2', "
"deterministic = false)"
)
ci_collation = "case_insensitive"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
# Case-insensitive collation.
old_field = Author._meta.get_field("name")
new_field_ci = CharField(max_length=255, db_collation=ci_collation)
new_field_ci.set_attributes_from_name("name")
new_field_ci.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field_ci, strict=True)
Author.objects.create(name="ANDREW")
self.assertIs(Author.objects.filter(name="Andrew").exists(), True)
# Case-sensitive collation.
new_field_cs = CharField(max_length=255, db_collation=cs_collation)
new_field_cs.set_attributes_from_name("name")
new_field_cs.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field_ci, new_field_cs, strict=True)
self.assertIs(Author.objects.filter(name="Andrew").exists(), False)
finally:
if connection.vendor == "postgresql":
with connection.cursor() as cursor:
cursor.execute("DROP COLLATION IF EXISTS case_insensitive")
|
b9acc54c22a693ff7ecd48838ae4d216f5502973c1df2ad83b47026004959443 | import argparse
import ctypes
import faulthandler
import io
import itertools
import logging
import multiprocessing
import os
import pickle
import random
import sys
import textwrap
import unittest
import warnings
from collections import defaultdict
from contextlib import contextmanager
from importlib import import_module
from io import StringIO
import sqlparse
import django
from django.core.management import call_command
from django.db import connections
from django.test import SimpleTestCase, TestCase
from django.test.utils import NullTimeKeeper, TimeKeeper, iter_test_cases
from django.test.utils import setup_databases as _setup_databases
from django.test.utils import setup_test_environment
from django.test.utils import teardown_databases as _teardown_databases
from django.test.utils import teardown_test_environment
from django.utils.crypto import new_hash
from django.utils.datastructures import OrderedSet
from django.utils.deprecation import RemovedInDjango50Warning
try:
import ipdb as pdb
except ImportError:
import pdb
try:
import tblib.pickling_support
except ImportError:
tblib = None
class DebugSQLTextTestResult(unittest.TextTestResult):
def __init__(self, stream, descriptions, verbosity):
self.logger = logging.getLogger("django.db.backends")
self.logger.setLevel(logging.DEBUG)
self.debug_sql_stream = None
super().__init__(stream, descriptions, verbosity)
def startTest(self, test):
self.debug_sql_stream = StringIO()
self.handler = logging.StreamHandler(self.debug_sql_stream)
self.logger.addHandler(self.handler)
super().startTest(test)
def stopTest(self, test):
super().stopTest(test)
self.logger.removeHandler(self.handler)
if self.showAll:
self.debug_sql_stream.seek(0)
self.stream.write(self.debug_sql_stream.read())
self.stream.writeln(self.separator2)
def addError(self, test, err):
super().addError(test, err)
if self.debug_sql_stream is None:
# Error before tests e.g. in setUpTestData().
sql = ""
else:
self.debug_sql_stream.seek(0)
sql = self.debug_sql_stream.read()
self.errors[-1] = self.errors[-1] + (sql,)
def addFailure(self, test, err):
super().addFailure(test, err)
self.debug_sql_stream.seek(0)
self.failures[-1] = self.failures[-1] + (self.debug_sql_stream.read(),)
def addSubTest(self, test, subtest, err):
super().addSubTest(test, subtest, err)
if err is not None:
self.debug_sql_stream.seek(0)
errors = (
self.failures
if issubclass(err[0], test.failureException)
else self.errors
)
errors[-1] = errors[-1] + (self.debug_sql_stream.read(),)
def printErrorList(self, flavour, errors):
for test, err, sql_debug in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln(err)
self.stream.writeln(self.separator2)
self.stream.writeln(
sqlparse.format(sql_debug, reindent=True, keyword_case="upper")
)
class PDBDebugResult(unittest.TextTestResult):
"""
Custom result class that triggers a PDB session when an error or failure
occurs.
"""
def addError(self, test, err):
super().addError(test, err)
self.debug(err)
def addFailure(self, test, err):
super().addFailure(test, err)
self.debug(err)
def addSubTest(self, test, subtest, err):
if err is not None:
self.debug(err)
super().addSubTest(test, subtest, err)
def debug(self, error):
self._restoreStdout()
self.buffer = False
exc_type, exc_value, traceback = error
print("\nOpening PDB: %r" % exc_value)
pdb.post_mortem(traceback)
class DummyList:
"""
Dummy list class for faking storage of results in unittest.TestResult.
"""
__slots__ = ()
def append(self, item):
pass
class RemoteTestResult(unittest.TestResult):
"""
Extend unittest.TestResult to record events in the child processes so they
can be replayed in the parent process. Events include things like which
tests succeeded or failed.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Fake storage of results to reduce memory usage. These are used by the
# unittest default methods, but here 'events' is used instead.
dummy_list = DummyList()
self.failures = dummy_list
self.errors = dummy_list
self.skipped = dummy_list
self.expectedFailures = dummy_list
self.unexpectedSuccesses = dummy_list
if tblib is not None:
tblib.pickling_support.install()
self.events = []
def __getstate__(self):
# Make this class picklable by removing the file-like buffer
# attributes. This is possible since they aren't used after unpickling
# after being sent to ParallelTestSuite.
state = self.__dict__.copy()
state.pop("_stdout_buffer", None)
state.pop("_stderr_buffer", None)
state.pop("_original_stdout", None)
state.pop("_original_stderr", None)
return state
@property
def test_index(self):
return self.testsRun - 1
def _confirm_picklable(self, obj):
"""
Confirm that obj can be pickled and unpickled as multiprocessing will
need to pickle the exception in the child process and unpickle it in
the parent process. Let the exception rise, if not.
"""
pickle.loads(pickle.dumps(obj))
def _print_unpicklable_subtest(self, test, subtest, pickle_exc):
print(
"""
Subtest failed:
test: {}
subtest: {}
Unfortunately, the subtest that failed cannot be pickled, so the parallel
test runner cannot handle it cleanly. Here is the pickling error:
> {}
You should re-run this test with --parallel=1 to reproduce the failure
with a cleaner failure message.
""".format(
test, subtest, pickle_exc
)
)
def check_picklable(self, test, err):
# Ensure that sys.exc_info() tuples are picklable. This displays a
# clear multiprocessing.pool.RemoteTraceback generated in the child
# process instead of a multiprocessing.pool.MaybeEncodingError, making
# the root cause easier to figure out for users who aren't familiar
# with the multiprocessing module. Since we're in a forked process,
# our best chance to communicate with them is to print to stdout.
try:
self._confirm_picklable(err)
except Exception as exc:
original_exc_txt = repr(err[1])
original_exc_txt = textwrap.fill(
original_exc_txt, 75, initial_indent=" ", subsequent_indent=" "
)
pickle_exc_txt = repr(exc)
pickle_exc_txt = textwrap.fill(
pickle_exc_txt, 75, initial_indent=" ", subsequent_indent=" "
)
if tblib is None:
print(
"""
{} failed:
{}
Unfortunately, tracebacks cannot be pickled, making it impossible for the
parallel test runner to handle this exception cleanly.
In order to see the traceback, you should install tblib:
python -m pip install tblib
""".format(
test, original_exc_txt
)
)
else:
print(
"""
{} failed:
{}
Unfortunately, the exception it raised cannot be pickled, making it impossible
for the parallel test runner to handle it cleanly.
Here's the error encountered while trying to pickle the exception:
{}
You should re-run this test with the --parallel=1 option to reproduce the
failure and get a correct traceback.
""".format(
test, original_exc_txt, pickle_exc_txt
)
)
raise
def check_subtest_picklable(self, test, subtest):
try:
self._confirm_picklable(subtest)
except Exception as exc:
self._print_unpicklable_subtest(test, subtest, exc)
raise
def startTestRun(self):
super().startTestRun()
self.events.append(("startTestRun",))
def stopTestRun(self):
super().stopTestRun()
self.events.append(("stopTestRun",))
def startTest(self, test):
super().startTest(test)
self.events.append(("startTest", self.test_index))
def stopTest(self, test):
super().stopTest(test)
self.events.append(("stopTest", self.test_index))
def addError(self, test, err):
self.check_picklable(test, err)
self.events.append(("addError", self.test_index, err))
super().addError(test, err)
def addFailure(self, test, err):
self.check_picklable(test, err)
self.events.append(("addFailure", self.test_index, err))
super().addFailure(test, err)
def addSubTest(self, test, subtest, err):
# Follow Python's implementation of unittest.TestResult.addSubTest() by
# not doing anything when a subtest is successful.
if err is not None:
# Call check_picklable() before check_subtest_picklable() since
# check_picklable() performs the tblib check.
self.check_picklable(test, err)
self.check_subtest_picklable(test, subtest)
self.events.append(("addSubTest", self.test_index, subtest, err))
super().addSubTest(test, subtest, err)
def addSuccess(self, test):
self.events.append(("addSuccess", self.test_index))
super().addSuccess(test)
def addSkip(self, test, reason):
self.events.append(("addSkip", self.test_index, reason))
super().addSkip(test, reason)
def addExpectedFailure(self, test, err):
# If tblib isn't installed, pickling the traceback will always fail.
# However we don't want tblib to be required for running the tests
# when they pass or fail as expected. Drop the traceback when an
# expected failure occurs.
if tblib is None:
err = err[0], err[1], None
self.check_picklable(test, err)
self.events.append(("addExpectedFailure", self.test_index, err))
super().addExpectedFailure(test, err)
def addUnexpectedSuccess(self, test):
self.events.append(("addUnexpectedSuccess", self.test_index))
super().addUnexpectedSuccess(test)
def wasSuccessful(self):
"""Tells whether or not this result was a success."""
failure_types = {"addError", "addFailure", "addSubTest", "addUnexpectedSuccess"}
return all(e[0] not in failure_types for e in self.events)
def _exc_info_to_string(self, err, test):
# Make this method no-op. It only powers the default unittest behavior
# for recording errors, but this class pickles errors into 'events'
# instead.
return ""
class RemoteTestRunner:
"""
Run tests and record everything but don't display anything.
The implementation matches the unpythonic coding style of unittest2.
"""
resultclass = RemoteTestResult
def __init__(self, failfast=False, resultclass=None, buffer=False):
self.failfast = failfast
self.buffer = buffer
if resultclass is not None:
self.resultclass = resultclass
def run(self, test):
result = self.resultclass()
unittest.registerResult(result)
result.failfast = self.failfast
result.buffer = self.buffer
test(result)
return result
def get_max_test_processes():
"""
The maximum number of test processes when using the --parallel option.
"""
# The current implementation of the parallel test runner requires
# multiprocessing to start subprocesses with fork() or spawn().
if multiprocessing.get_start_method() not in {"fork", "spawn"}:
return 1
try:
return int(os.environ["DJANGO_TEST_PROCESSES"])
except KeyError:
return multiprocessing.cpu_count()
def parallel_type(value):
"""Parse value passed to the --parallel option."""
if value == "auto":
return value
try:
return int(value)
except ValueError:
raise argparse.ArgumentTypeError(
f"{value!r} is not an integer or the string 'auto'"
)
_worker_id = 0
def _init_worker(
counter,
initial_settings=None,
serialized_contents=None,
process_setup=None,
process_setup_args=None,
debug_mode=None,
):
"""
Switch to databases dedicated to this worker.
This helper lives at module-level because of the multiprocessing module's
requirements.
"""
global _worker_id
with counter.get_lock():
counter.value += 1
_worker_id = counter.value
start_method = multiprocessing.get_start_method()
if start_method == "spawn":
if process_setup and callable(process_setup):
if process_setup_args is None:
process_setup_args = ()
process_setup(*process_setup_args)
django.setup()
setup_test_environment(debug=debug_mode)
for alias in connections:
connection = connections[alias]
if start_method == "spawn":
# Restore initial settings in spawned processes.
connection.settings_dict.update(initial_settings[alias])
if value := serialized_contents.get(alias):
connection._test_serialized_contents = value
connection.creation.setup_worker_connection(_worker_id)
def _run_subsuite(args):
"""
Run a suite of tests with a RemoteTestRunner and return a RemoteTestResult.
This helper lives at module-level and its arguments are wrapped in a tuple
because of the multiprocessing module's requirements.
"""
runner_class, subsuite_index, subsuite, failfast, buffer = args
runner = runner_class(failfast=failfast, buffer=buffer)
result = runner.run(subsuite)
return subsuite_index, result.events
def _process_setup_stub(*args):
"""Stub method to simplify run() implementation."""
pass
class ParallelTestSuite(unittest.TestSuite):
"""
Run a series of tests in parallel in several processes.
While the unittest module's documentation implies that orchestrating the
execution of tests is the responsibility of the test runner, in practice,
it appears that TestRunner classes are more concerned with formatting and
displaying test results.
Since there are fewer use cases for customizing TestSuite than TestRunner,
implementing parallelization at the level of the TestSuite improves
interoperability with existing custom test runners. A single instance of a
test runner can still collect results from all tests without being aware
that they have been run in parallel.
"""
# In case someone wants to modify these in a subclass.
init_worker = _init_worker
process_setup = _process_setup_stub
process_setup_args = ()
run_subsuite = _run_subsuite
runner_class = RemoteTestRunner
def __init__(
self, subsuites, processes, failfast=False, debug_mode=False, buffer=False
):
self.subsuites = subsuites
self.processes = processes
self.failfast = failfast
self.debug_mode = debug_mode
self.buffer = buffer
self.initial_settings = None
self.serialized_contents = None
super().__init__()
def run(self, result):
"""
Distribute test cases across workers.
Return an identifier of each test case with its result in order to use
imap_unordered to show results as soon as they're available.
To minimize pickling errors when getting results from workers:
- pass back numeric indexes in self.subsuites instead of tests
- make tracebacks picklable with tblib, if available
Even with tblib, errors may still occur for dynamically created
exception classes which cannot be unpickled.
"""
self.initialize_suite()
counter = multiprocessing.Value(ctypes.c_int, 0)
pool = multiprocessing.Pool(
processes=self.processes,
initializer=self.init_worker.__func__,
initargs=[
counter,
self.initial_settings,
self.serialized_contents,
self.process_setup.__func__,
self.process_setup_args,
self.debug_mode,
],
)
args = [
(self.runner_class, index, subsuite, self.failfast, self.buffer)
for index, subsuite in enumerate(self.subsuites)
]
test_results = pool.imap_unordered(self.run_subsuite.__func__, args)
while True:
if result.shouldStop:
pool.terminate()
break
try:
subsuite_index, events = test_results.next(timeout=0.1)
except multiprocessing.TimeoutError:
continue
except StopIteration:
pool.close()
break
tests = list(self.subsuites[subsuite_index])
for event in events:
event_name = event[0]
handler = getattr(result, event_name, None)
if handler is None:
continue
test = tests[event[1]]
args = event[2:]
handler(test, *args)
pool.join()
return result
def __iter__(self):
return iter(self.subsuites)
def initialize_suite(self):
if multiprocessing.get_start_method() == "spawn":
self.initial_settings = {
alias: connections[alias].settings_dict for alias in connections
}
self.serialized_contents = {
alias: connections[alias]._test_serialized_contents
for alias in connections
if alias in self.serialized_aliases
}
class Shuffler:
"""
This class implements shuffling with a special consistency property.
Consistency means that, for a given seed and key function, if two sets of
items are shuffled, the resulting order will agree on the intersection of
the two sets. For example, if items are removed from an original set, the
shuffled order for the new set will be the shuffled order of the original
set restricted to the smaller set.
"""
# This doesn't need to be cryptographically strong, so use what's fastest.
hash_algorithm = "md5"
@classmethod
def _hash_text(cls, text):
h = new_hash(cls.hash_algorithm, usedforsecurity=False)
h.update(text.encode("utf-8"))
return h.hexdigest()
def __init__(self, seed=None):
if seed is None:
# Limit seeds to 10 digits for simpler output.
seed = random.randint(0, 10**10 - 1)
seed_source = "generated"
else:
seed_source = "given"
self.seed = seed
self.seed_source = seed_source
@property
def seed_display(self):
return f"{self.seed!r} ({self.seed_source})"
def _hash_item(self, item, key):
text = "{}{}".format(self.seed, key(item))
return self._hash_text(text)
def shuffle(self, items, key):
"""
Return a new list of the items in a shuffled order.
The `key` is a function that accepts an item in `items` and returns
a string unique for that item that can be viewed as a string id. The
order of the return value is deterministic. It depends on the seed
and key function but not on the original order.
"""
hashes = {}
for item in items:
hashed = self._hash_item(item, key)
if hashed in hashes:
msg = "item {!r} has same hash {!r} as item {!r}".format(
item,
hashed,
hashes[hashed],
)
raise RuntimeError(msg)
hashes[hashed] = item
return [hashes[hashed] for hashed in sorted(hashes)]
class DiscoverRunner:
"""A Django test runner that uses unittest2 test discovery."""
test_suite = unittest.TestSuite
parallel_test_suite = ParallelTestSuite
test_runner = unittest.TextTestRunner
test_loader = unittest.defaultTestLoader
reorder_by = (TestCase, SimpleTestCase)
def __init__(
self,
pattern=None,
top_level=None,
verbosity=1,
interactive=True,
failfast=False,
keepdb=False,
reverse=False,
debug_mode=False,
debug_sql=False,
parallel=0,
tags=None,
exclude_tags=None,
test_name_patterns=None,
pdb=False,
buffer=False,
enable_faulthandler=True,
timing=False,
shuffle=False,
logger=None,
**kwargs,
):
self.pattern = pattern
self.top_level = top_level
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
self.keepdb = keepdb
self.reverse = reverse
self.debug_mode = debug_mode
self.debug_sql = debug_sql
self.parallel = parallel
self.tags = set(tags or [])
self.exclude_tags = set(exclude_tags or [])
if not faulthandler.is_enabled() and enable_faulthandler:
try:
faulthandler.enable(file=sys.stderr.fileno())
except (AttributeError, io.UnsupportedOperation):
faulthandler.enable(file=sys.__stderr__.fileno())
self.pdb = pdb
if self.pdb and self.parallel > 1:
raise ValueError(
"You cannot use --pdb with parallel tests; pass --parallel=1 to use it."
)
self.buffer = buffer
self.test_name_patterns = None
self.time_keeper = TimeKeeper() if timing else NullTimeKeeper()
if test_name_patterns:
# unittest does not export the _convert_select_pattern function
# that converts command-line arguments to patterns.
self.test_name_patterns = {
pattern if "*" in pattern else "*%s*" % pattern
for pattern in test_name_patterns
}
self.shuffle = shuffle
self._shuffler = None
self.logger = logger
@classmethod
def add_arguments(cls, parser):
parser.add_argument(
"-t",
"--top-level-directory",
dest="top_level",
help="Top level of project for unittest discovery.",
)
parser.add_argument(
"-p",
"--pattern",
default="test*.py",
help="The test matching pattern. Defaults to test*.py.",
)
parser.add_argument(
"--keepdb", action="store_true", help="Preserves the test DB between runs."
)
parser.add_argument(
"--shuffle",
nargs="?",
default=False,
type=int,
metavar="SEED",
help="Shuffles test case order.",
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
help="Reverses test case order.",
)
parser.add_argument(
"--debug-mode",
action="store_true",
help="Sets settings.DEBUG to True.",
)
parser.add_argument(
"-d",
"--debug-sql",
action="store_true",
help="Prints logged SQL queries on failure.",
)
parser.add_argument(
"--parallel",
nargs="?",
const="auto",
default=0,
type=parallel_type,
metavar="N",
help=(
"Run tests using up to N parallel processes. Use the value "
'"auto" to run one test process for each processor core.'
),
)
parser.add_argument(
"--tag",
action="append",
dest="tags",
help="Run only tests with the specified tag. Can be used multiple times.",
)
parser.add_argument(
"--exclude-tag",
action="append",
dest="exclude_tags",
help="Do not run tests with the specified tag. Can be used multiple times.",
)
parser.add_argument(
"--pdb",
action="store_true",
help="Runs a debugger (pdb, or ipdb if installed) on error or failure.",
)
parser.add_argument(
"-b",
"--buffer",
action="store_true",
help="Discard output from passing tests.",
)
parser.add_argument(
"--no-faulthandler",
action="store_false",
dest="enable_faulthandler",
help="Disables the Python faulthandler module during tests.",
)
parser.add_argument(
"--timing",
action="store_true",
help=("Output timings, including database set up and total run time."),
)
parser.add_argument(
"-k",
action="append",
dest="test_name_patterns",
help=(
"Only run test methods and classes that match the pattern "
"or substring. Can be used multiple times. Same as "
"unittest -k option."
),
)
@property
def shuffle_seed(self):
if self._shuffler is None:
return None
return self._shuffler.seed
def log(self, msg, level=None):
"""
Log the message at the given logging level (the default is INFO).
If a logger isn't set, the message is instead printed to the console,
respecting the configured verbosity. A verbosity of 0 prints no output,
a verbosity of 1 prints INFO and above, and a verbosity of 2 or higher
prints all levels.
"""
if level is None:
level = logging.INFO
if self.logger is None:
if self.verbosity <= 0 or (self.verbosity == 1 and level < logging.INFO):
return
print(msg)
else:
self.logger.log(level, msg)
def setup_test_environment(self, **kwargs):
setup_test_environment(debug=self.debug_mode)
unittest.installHandler()
def setup_shuffler(self):
if self.shuffle is False:
return
shuffler = Shuffler(seed=self.shuffle)
self.log(f"Using shuffle seed: {shuffler.seed_display}")
self._shuffler = shuffler
@contextmanager
def load_with_patterns(self):
original_test_name_patterns = self.test_loader.testNamePatterns
self.test_loader.testNamePatterns = self.test_name_patterns
try:
yield
finally:
# Restore the original patterns.
self.test_loader.testNamePatterns = original_test_name_patterns
def load_tests_for_label(self, label, discover_kwargs):
label_as_path = os.path.abspath(label)
tests = None
# If a module, or "module.ClassName[.method_name]", just run those.
if not os.path.exists(label_as_path):
with self.load_with_patterns():
tests = self.test_loader.loadTestsFromName(label)
if tests.countTestCases():
return tests
# Try discovery if "label" is a package or directory.
is_importable, is_package = try_importing(label)
if is_importable:
if not is_package:
return tests
elif not os.path.isdir(label_as_path):
if os.path.exists(label_as_path):
assert tests is None
raise RuntimeError(
f"One of the test labels is a path to a file: {label!r}, "
f"which is not supported. Use a dotted module name or "
f"path to a directory instead."
)
return tests
kwargs = discover_kwargs.copy()
if os.path.isdir(label_as_path) and not self.top_level:
kwargs["top_level_dir"] = find_top_level(label_as_path)
with self.load_with_patterns():
tests = self.test_loader.discover(start_dir=label, **kwargs)
# Make unittest forget the top-level dir it calculated from this run,
# to support running tests from two different top-levels.
self.test_loader._top_level_dir = None
return tests
def build_suite(self, test_labels=None, extra_tests=None, **kwargs):
if extra_tests is not None:
warnings.warn(
"The extra_tests argument is deprecated.",
RemovedInDjango50Warning,
stacklevel=2,
)
test_labels = test_labels or ["."]
extra_tests = extra_tests or []
discover_kwargs = {}
if self.pattern is not None:
discover_kwargs["pattern"] = self.pattern
if self.top_level is not None:
discover_kwargs["top_level_dir"] = self.top_level
self.setup_shuffler()
all_tests = []
for label in test_labels:
tests = self.load_tests_for_label(label, discover_kwargs)
all_tests.extend(iter_test_cases(tests))
all_tests.extend(iter_test_cases(extra_tests))
if self.tags or self.exclude_tags:
if self.tags:
self.log(
"Including test tag(s): %s." % ", ".join(sorted(self.tags)),
level=logging.DEBUG,
)
if self.exclude_tags:
self.log(
"Excluding test tag(s): %s." % ", ".join(sorted(self.exclude_tags)),
level=logging.DEBUG,
)
all_tests = filter_tests_by_tags(all_tests, self.tags, self.exclude_tags)
# Put the failures detected at load time first for quicker feedback.
# _FailedTest objects include things like test modules that couldn't be
# found or that couldn't be loaded due to syntax errors.
test_types = (unittest.loader._FailedTest, *self.reorder_by)
all_tests = list(
reorder_tests(
all_tests,
test_types,
shuffler=self._shuffler,
reverse=self.reverse,
)
)
self.log("Found %d test(s)." % len(all_tests))
suite = self.test_suite(all_tests)
if self.parallel > 1:
subsuites = partition_suite_by_case(suite)
# Since tests are distributed across processes on a per-TestCase
# basis, there's no need for more processes than TestCases.
processes = min(self.parallel, len(subsuites))
# Update also "parallel" because it's used to determine the number
# of test databases.
self.parallel = processes
if processes > 1:
suite = self.parallel_test_suite(
subsuites,
processes,
self.failfast,
self.debug_mode,
self.buffer,
)
return suite
def setup_databases(self, **kwargs):
return _setup_databases(
self.verbosity,
self.interactive,
time_keeper=self.time_keeper,
keepdb=self.keepdb,
debug_sql=self.debug_sql,
parallel=self.parallel,
**kwargs,
)
def get_resultclass(self):
if self.debug_sql:
return DebugSQLTextTestResult
elif self.pdb:
return PDBDebugResult
def get_test_runner_kwargs(self):
return {
"failfast": self.failfast,
"resultclass": self.get_resultclass(),
"verbosity": self.verbosity,
"buffer": self.buffer,
}
def run_checks(self, databases):
# Checks are run after database creation since some checks require
# database access.
call_command("check", verbosity=self.verbosity, databases=databases)
def run_suite(self, suite, **kwargs):
kwargs = self.get_test_runner_kwargs()
runner = self.test_runner(**kwargs)
try:
return runner.run(suite)
finally:
if self._shuffler is not None:
seed_display = self._shuffler.seed_display
self.log(f"Used shuffle seed: {seed_display}")
def teardown_databases(self, old_config, **kwargs):
"""Destroy all the non-mirror databases."""
_teardown_databases(
old_config,
verbosity=self.verbosity,
parallel=self.parallel,
keepdb=self.keepdb,
)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return (
len(result.failures) + len(result.errors) + len(result.unexpectedSuccesses)
)
def _get_databases(self, suite):
databases = {}
for test in iter_test_cases(suite):
test_databases = getattr(test, "databases", None)
if test_databases == "__all__":
test_databases = connections
if test_databases:
serialized_rollback = getattr(test, "serialized_rollback", False)
databases.update(
(alias, serialized_rollback or databases.get(alias, False))
for alias in test_databases
)
return databases
def get_databases(self, suite):
databases = self._get_databases(suite)
unused_databases = [alias for alias in connections if alias not in databases]
if unused_databases:
self.log(
"Skipping setup of unused database(s): %s."
% ", ".join(sorted(unused_databases)),
level=logging.DEBUG,
)
return databases
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Test labels should be dotted Python paths to test modules, test
classes, or test methods.
Return the number of tests that failed.
"""
if extra_tests is not None:
warnings.warn(
"The extra_tests argument is deprecated.",
RemovedInDjango50Warning,
stacklevel=2,
)
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
databases = self.get_databases(suite)
suite.serialized_aliases = set(
alias for alias, serialize in databases.items() if serialize
)
with self.time_keeper.timed("Total database setup"):
old_config = self.setup_databases(
aliases=databases,
serialized_aliases=suite.serialized_aliases,
)
run_failed = False
try:
self.run_checks(databases)
result = self.run_suite(suite)
except Exception:
run_failed = True
raise
finally:
try:
with self.time_keeper.timed("Total database teardown"):
self.teardown_databases(old_config)
self.teardown_test_environment()
except Exception:
# Silence teardown exceptions if an exception was raised during
# runs to avoid shadowing it.
if not run_failed:
raise
self.time_keeper.print_results()
return self.suite_result(suite, result)
def try_importing(label):
"""
Try importing a test label, and return (is_importable, is_package).
Relative labels like "." and ".." are seen as directories.
"""
try:
mod = import_module(label)
except (ImportError, TypeError):
return (False, False)
return (True, hasattr(mod, "__path__"))
def find_top_level(top_level):
# Try to be a bit smarter than unittest about finding the default top-level
# for a given directory path, to avoid breaking relative imports.
# (Unittest's default is to set top-level equal to the path, which means
# relative imports will result in "Attempted relative import in
# non-package.").
# We'd be happy to skip this and require dotted module paths (which don't
# cause this problem) instead of file paths (which do), but in the case of
# a directory in the cwd, which would be equally valid if considered as a
# top-level module or as a directory path, unittest unfortunately prefers
# the latter.
while True:
init_py = os.path.join(top_level, "__init__.py")
if not os.path.exists(init_py):
break
try_next = os.path.dirname(top_level)
if try_next == top_level:
# __init__.py all the way down? give up.
break
top_level = try_next
return top_level
def _class_shuffle_key(cls):
return f"{cls.__module__}.{cls.__qualname__}"
def shuffle_tests(tests, shuffler):
"""
Return an iterator over the given tests in a shuffled order, keeping tests
next to other tests of their class.
`tests` should be an iterable of tests.
"""
tests_by_type = {}
for _, class_tests in itertools.groupby(tests, type):
class_tests = list(class_tests)
test_type = type(class_tests[0])
class_tests = shuffler.shuffle(class_tests, key=lambda test: test.id())
tests_by_type[test_type] = class_tests
classes = shuffler.shuffle(tests_by_type, key=_class_shuffle_key)
return itertools.chain(*(tests_by_type[cls] for cls in classes))
def reorder_test_bin(tests, shuffler=None, reverse=False):
"""
Return an iterator that reorders the given tests, keeping tests next to
other tests of their class.
`tests` should be an iterable of tests that supports reversed().
"""
if shuffler is None:
if reverse:
return reversed(tests)
# The function must return an iterator.
return iter(tests)
tests = shuffle_tests(tests, shuffler)
if not reverse:
return tests
# Arguments to reversed() must be reversible.
return reversed(list(tests))
def reorder_tests(tests, classes, reverse=False, shuffler=None):
"""
Reorder an iterable of tests, grouping by the given TestCase classes.
This function also removes any duplicates and reorders so that tests of the
same type are consecutive.
The result is returned as an iterator. `classes` is a sequence of types.
Tests that are instances of `classes[0]` are grouped first, followed by
instances of `classes[1]`, etc. Tests that are not instances of any of the
classes are grouped last.
If `reverse` is True, the tests within each `classes` group are reversed,
but without reversing the order of `classes` itself.
The `shuffler` argument is an optional instance of this module's `Shuffler`
class. If provided, tests will be shuffled within each `classes` group, but
keeping tests with other tests of their TestCase class. Reversing is
applied after shuffling to allow reversing the same random order.
"""
# Each bin maps TestCase class to OrderedSet of tests. This permits tests
# to be grouped by TestCase class even if provided non-consecutively.
bins = [defaultdict(OrderedSet) for i in range(len(classes) + 1)]
*class_bins, last_bin = bins
for test in tests:
for test_bin, test_class in zip(class_bins, classes):
if isinstance(test, test_class):
break
else:
test_bin = last_bin
test_bin[type(test)].add(test)
for test_bin in bins:
# Call list() since reorder_test_bin()'s input must support reversed().
tests = list(itertools.chain.from_iterable(test_bin.values()))
yield from reorder_test_bin(tests, shuffler=shuffler, reverse=reverse)
def partition_suite_by_case(suite):
"""Partition a test suite by test case, preserving the order of tests."""
suite_class = type(suite)
all_tests = iter_test_cases(suite)
return [suite_class(tests) for _, tests in itertools.groupby(all_tests, type)]
def test_match_tags(test, tags, exclude_tags):
if isinstance(test, unittest.loader._FailedTest):
# Tests that couldn't load always match to prevent tests from falsely
# passing due e.g. to syntax errors.
return True
test_tags = set(getattr(test, "tags", []))
test_fn_name = getattr(test, "_testMethodName", str(test))
if hasattr(test, test_fn_name):
test_fn = getattr(test, test_fn_name)
test_fn_tags = list(getattr(test_fn, "tags", []))
test_tags = test_tags.union(test_fn_tags)
if tags and test_tags.isdisjoint(tags):
return False
return test_tags.isdisjoint(exclude_tags)
def filter_tests_by_tags(tests, tags, exclude_tags):
"""Return the matching tests as an iterator."""
return (test for test in tests if test_match_tags(test, tags, exclude_tags))
|
e3c59c7ef483901b1867e0469364ed1d3c98e96ff9c7a61ecd45798bda692884 | import functools
import re
import sys
import types
import warnings
from pathlib import Path
from django.conf import settings
from django.http import Http404, HttpResponse, HttpResponseNotFound
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.defaultfilters import pprint
from django.urls import resolve
from django.utils import timezone
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str
from django.utils.module_loading import import_string
from django.utils.regex_helper import _lazy_re_compile
from django.utils.version import get_docs_version
# Minimal Django templates engine to render the error templates
# regardless of the project's TEMPLATES setting. Templates are
# read directly from the filesystem so that the error handler
# works even if the template loader is broken.
DEBUG_ENGINE = Engine(
debug=True,
libraries={"i18n": "django.templatetags.i18n"},
)
def builtin_template_path(name):
"""
Return a path to a builtin template.
Avoid calling this function at the module level or in a class-definition
because __file__ may not exist, e.g. in frozen environments.
"""
return Path(__file__).parent / "templates" / name
class ExceptionCycleWarning(UserWarning):
pass
class CallableSettingWrapper:
"""
Object to wrap callable appearing in settings.
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes
(#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = get_exception_reporter_class(request)(request, exc_type, exc_value, tb)
if request.accepts("text/html"):
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code)
else:
text = reporter.get_traceback_text()
return HttpResponse(
text, status=status_code, content_type="text/plain; charset=utf-8"
)
@functools.lru_cache
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, "exception_reporter_filter", default_filter)
def get_exception_reporter_class(request):
default_exception_reporter_class = import_string(
settings.DEFAULT_EXCEPTION_REPORTER
)
return getattr(
request, "exception_reporter_class", default_exception_reporter_class
)
def get_caller(request):
resolver_match = request.resolver_match
if resolver_match is None:
try:
resolver_match = resolve(request.path)
except Http404:
pass
return "" if resolver_match is None else resolver_match._func_path
class SafeExceptionReporterFilter:
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
cleansed_substitute = "********************"
hidden_settings = _lazy_re_compile(
"API|TOKEN|KEY|SECRET|PASS|SIGNATURE|HTTP_COOKIE", flags=re.I
)
def cleanse_setting(self, key, value):
"""
Cleanse an individual setting key/value of sensitive content. If the
value is a dictionary, recursively cleanse the keys in that dictionary.
"""
if key == settings.SESSION_COOKIE_NAME:
is_sensitive = True
else:
try:
is_sensitive = self.hidden_settings.search(key)
except TypeError:
is_sensitive = False
if is_sensitive:
cleansed = self.cleansed_substitute
elif isinstance(value, dict):
cleansed = {k: self.cleanse_setting(k, v) for k, v in value.items()}
elif isinstance(value, list):
cleansed = [self.cleanse_setting("", v) for v in value]
elif isinstance(value, tuple):
cleansed = tuple([self.cleanse_setting("", v) for v in value])
else:
cleansed = value
if callable(cleansed):
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings(self):
"""
Return a dictionary of the settings module with values of sensitive
settings replaced with stars (*********).
"""
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = self.cleanse_setting(k, getattr(settings, k))
return settings_dict
def get_safe_request_meta(self, request):
"""
Return a dictionary of request.META with sensitive values redacted.
"""
if not hasattr(request, "META"):
return {}
return {k: self.cleanse_setting(k, v) for k, v in request.META.items()}
def get_safe_cookies(self, request):
"""
Return a dictionary of request.COOKIES with sensitive values redacted.
"""
if not hasattr(request, "COOKIES"):
return {}
return {k: self.cleanse_setting(k, v) for k, v in request.COOKIES.items()}
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replace the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, "sensitive_post_parameters", [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = self.cleansed_substitute
return multivaluedict
def get_post_parameters(self, request):
"""
Replace the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(
request, "sensitive_post_parameters", []
)
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == "__ALL__":
# Cleanse all parameters.
for k in cleansed:
cleansed[k] = self.cleansed_substitute
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = self.cleansed_substitute
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
try:
# If value is lazy or a complex object of another kind, this check
# might raise an exception. isinstance checks that lazy
# MultiValueDicts will have a return value.
is_multivalue_dict = isinstance(value, MultiValueDict)
except Exception as e:
return "{!r} while evaluating {!r}".format(e, value)
if is_multivalue_dict:
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replace the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (
current_frame.f_code.co_name == "sensitive_variables_wrapper"
and "sensitive_variables_wrapper" in current_frame.f_locals
):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals["sensitive_variables_wrapper"]
sensitive_variables = getattr(wrapper, "sensitive_variables", None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == "__ALL__":
# Cleanse all variables
for name in tb_frame.f_locals:
cleansed[name] = self.cleansed_substitute
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = self.cleansed_substitute
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (
tb_frame.f_code.co_name == "sensitive_variables_wrapper"
and "sensitive_variables_wrapper" in tb_frame.f_locals
):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed["func_args"] = self.cleansed_substitute
cleansed["func_kwargs"] = self.cleansed_substitute
return cleansed.items()
class ExceptionReporter:
"""Organize and coordinate reporting on exceptions."""
@property
def html_template_path(self):
return builtin_template_path("technical_500.html")
@property
def text_template_path(self):
return builtin_template_path("technical_500.txt")
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = getattr(self.exc_value, "template_debug", None)
self.template_does_not_exist = False
self.postmortem = None
def _get_raw_insecure_uri(self):
"""
Return an absolute URI from variables available in this request. Skip
allowed hosts protection, so may return insecure URI.
"""
return "{scheme}://{host}{path}".format(
scheme=self.request.scheme,
host=self.request._get_raw_host(),
path=self.request.get_full_path(),
)
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
self.postmortem = self.exc_value.chain or [self.exc_value]
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if "vars" in frame:
frame_vars = []
for k, v in frame["vars"]:
v = pprint(v)
# Trim large blobs of data
if len(v) > 4096:
v = "%s… <trimmed %d bytes string>" % (v[0:4096], len(v))
frame_vars.append((k, v))
frame["vars"] = frame_vars
frames[i] = frame
unicode_hint = ""
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, "start", None)
end = getattr(self.exc_value, "end", None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = force_str(
unicode_str[max(start - 5, 0) : min(end + 5, len(unicode_str))],
"ascii",
errors="replace",
)
from django import get_version
if self.request is None:
user_str = None
else:
try:
user_str = str(self.request.user)
except Exception:
# request.user may raise OperationalError if the database is
# unavailable, for example.
user_str = "[unable to retrieve the current user]"
c = {
"is_email": self.is_email,
"unicode_hint": unicode_hint,
"frames": frames,
"request": self.request,
"request_meta": self.filter.get_safe_request_meta(self.request),
"request_COOKIES_items": self.filter.get_safe_cookies(self.request).items(),
"user_str": user_str,
"filtered_POST_items": list(
self.filter.get_post_parameters(self.request).items()
),
"settings": self.filter.get_safe_settings(),
"sys_executable": sys.executable,
"sys_version_info": "%d.%d.%d" % sys.version_info[0:3],
"server_time": timezone.now(),
"django_version_info": get_version(),
"sys_path": sys.path,
"template_info": self.template_info,
"template_does_not_exist": self.template_does_not_exist,
"postmortem": self.postmortem,
}
if self.request is not None:
c["request_GET_items"] = self.request.GET.items()
c["request_FILES_items"] = self.request.FILES.items()
c["request_insecure_uri"] = self._get_raw_insecure_uri()
c["raising_view_name"] = get_caller(self.request)
# Check whether exception info is available
if self.exc_type:
c["exception_type"] = self.exc_type.__name__
if self.exc_value:
c["exception_value"] = str(self.exc_value)
if frames:
c["lastframe"] = frames[-1]
return c
def get_traceback_html(self):
"""Return HTML version of debug 500 HTTP error page."""
with self.html_template_path.open(encoding="utf-8") as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"""Return plain text version of debug 500 HTTP error page."""
with self.text_template_path.open(encoding="utf-8") as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def _get_source(self, filename, loader, module_name):
source = None
if hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, "rb") as fp:
source = fp.read().splitlines()
except OSError:
pass
return source
def _get_lines_from_file(
self, filename, lineno, context_lines, loader=None, module_name=None
):
"""
Return context_lines before and after lineno from file.
Return (pre_context_lineno, pre_context, context_line, post_context).
"""
source = self._get_source(filename, loader, module_name)
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a
# string, then we should do that ourselves.
if isinstance(source[0], bytes):
encoding = "ascii"
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (https://www.python.org/dev/peps/pep-0263/)
match = re.search(rb"coding[:=]\s*([-\w.]+)", line)
if match:
encoding = match[1].decode("ascii")
break
source = [str(sline, encoding, "replace") for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
try:
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1 : upper_bound]
except IndexError:
return None, [], None, []
return lower_bound, pre_context, context_line, post_context
def _get_explicit_or_implicit_cause(self, exc_value):
explicit = getattr(exc_value, "__cause__", None)
suppress_context = getattr(exc_value, "__suppress_context__", None)
implicit = getattr(exc_value, "__context__", None)
return explicit or (None if suppress_context else implicit)
def get_traceback_frames(self):
# Get the exception and all its causes
exceptions = []
exc_value = self.exc_value
while exc_value:
exceptions.append(exc_value)
exc_value = self._get_explicit_or_implicit_cause(exc_value)
if exc_value in exceptions:
warnings.warn(
"Cycle in the exception chain detected: exception '%s' "
"encountered again." % exc_value,
ExceptionCycleWarning,
)
# Avoid infinite loop if there's a cyclic reference (#29393).
break
frames = []
# No exceptions were supplied to ExceptionReporter
if not exceptions:
return frames
# In case there's just one exception, take the traceback from self.tb
exc_value = exceptions.pop()
tb = self.tb if not exceptions else exc_value.__traceback__
while True:
frames.extend(self.get_exception_traceback_frames(exc_value, tb))
try:
exc_value = exceptions.pop()
except IndexError:
break
tb = exc_value.__traceback__
return frames
def get_exception_traceback_frames(self, exc_value, tb):
exc_cause = self._get_explicit_or_implicit_cause(exc_value)
exc_cause_explicit = getattr(exc_value, "__cause__", True)
if tb is None:
yield {
"exc_cause": exc_cause,
"exc_cause_explicit": exc_cause_explicit,
"tb": None,
"type": "user",
}
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get("__traceback_hide__"):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get("__loader__")
module_name = tb.tb_frame.f_globals.get("__name__") or ""
(
pre_context_lineno,
pre_context,
context_line,
post_context,
) = self._get_lines_from_file(
filename,
lineno,
7,
loader,
module_name,
)
if pre_context_lineno is None:
pre_context_lineno = lineno
pre_context = []
context_line = "<source code not available>"
post_context = []
yield {
"exc_cause": exc_cause,
"exc_cause_explicit": exc_cause_explicit,
"tb": tb,
"type": "django" if module_name.startswith("django.") else "user",
"filename": filename,
"function": function,
"lineno": lineno + 1,
"vars": self.filter.get_traceback_frame_variables(
self.request, tb.tb_frame
),
"id": id(tb),
"pre_context": pre_context,
"context_line": context_line,
"post_context": post_context,
"pre_context_lineno": pre_context_lineno + 1,
}
tb = tb.tb_next
def technical_404_response(request, exception):
"""Create a technical 404 error response. `exception` is the Http404."""
try:
error_url = exception.args[0]["path"]
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]["tried"]
except (IndexError, TypeError, KeyError):
resolved = True
tried = request.resolver_match.tried if request.resolver_match else None
else:
resolved = False
if not tried or ( # empty URLconf
request.path == "/"
and len(tried) == 1
and len(tried[0]) == 1 # default URLconf
and getattr(tried[0][0], "app_name", "")
== getattr(tried[0][0], "namespace", "")
== "admin"
):
return default_urlconf(request)
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
with builtin_template_path("technical_404.html").open(encoding="utf-8") as fh:
t = DEBUG_ENGINE.from_string(fh.read())
reporter_filter = get_default_exception_reporter_filter()
c = Context(
{
"urlconf": urlconf,
"root_urlconf": settings.ROOT_URLCONF,
"request_path": error_url,
"urlpatterns": tried,
"resolved": resolved,
"reason": str(exception),
"request": request,
"settings": reporter_filter.get_safe_settings(),
"raising_view_name": get_caller(request),
}
)
return HttpResponseNotFound(t.render(c))
def default_urlconf(request):
"""Create an empty URLconf 404 error response."""
with builtin_template_path("default_urlconf.html").open(encoding="utf-8") as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context(
{
"version": get_docs_version(),
}
)
return HttpResponse(t.render(c))
|
ca9bcb0c9f3591e46c3683a4031c6ca5703ec446ed42506bb7d9fe8405508100 | """Default variable filters."""
import random as random_module
import re
import types
import warnings
from decimal import ROUND_HALF_UP, Context, Decimal, InvalidOperation
from functools import wraps
from inspect import unwrap
from operator import itemgetter
from pprint import pformat
from urllib.parse import quote
from django.utils import formats
from django.utils.dateformat import format, time_format
from django.utils.deprecation import RemovedInDjango51Warning
from django.utils.encoding import iri_to_uri
from django.utils.html import avoid_wrapping, conditional_escape, escape, escapejs
from django.utils.html import json_script as _json_script
from django.utils.html import linebreaks, strip_tags
from django.utils.html import urlize as _urlize
from django.utils.safestring import SafeData, mark_safe
from django.utils.text import Truncator, normalize_newlines, phone2numeric
from django.utils.text import slugify as _slugify
from django.utils.text import wrap
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import gettext, ngettext
from .base import VARIABLE_ATTRIBUTE_SEPARATOR
from .library import Library
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive strings. The object
passed as the first positional argument will be converted to a string.
"""
@wraps(func)
def _dec(first, *args, **kwargs):
first = str(first)
result = func(first, *args, **kwargs)
if isinstance(first, SafeData) and getattr(unwrap(func), "is_safe", False):
result = mark_safe(result)
return result
return _dec
###################
# STRINGS #
###################
@register.filter(is_safe=True)
@stringfilter
def addslashes(value):
"""
Add slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace("\\", "\\\\").replace('"', '\\"').replace("'", "\\'")
@register.filter(is_safe=True)
@stringfilter
def capfirst(value):
"""Capitalize the first character of the value."""
return value and value[0].upper() + value[1:]
@register.filter("escapejs")
@stringfilter
def escapejs_filter(value):
"""Hex encode characters for use in JavaScript strings."""
return escapejs(value)
@register.filter(is_safe=True)
def json_script(value, element_id=None):
"""
Output value JSON-encoded, wrapped in a <script type="application/json">
tag (with an optional id).
"""
return _json_script(value, element_id)
@register.filter(is_safe=True)
def floatformat(text, arg=-1):
"""
Display a float to a specified number of decimal places.
If called without an argument, display the floating point number with one
decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, always display exactly arg number of decimal places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, display arg number of decimal places -- but only if
there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If arg has the 'g' suffix, force the result to be grouped by the
THOUSAND_SEPARATOR for the active locale. When the active locale is
en (English):
* {{ 6666.6666|floatformat:"2g" }} displays "6,666.67"
* {{ 10000|floatformat:"g" }} displays "10,000"
If arg has the 'u' suffix, force the result to be unlocalized. When the
active locale is pl (Polish):
* {{ 66666.6666|floatformat:"2" }} displays "66666,67"
* {{ 66666.6666|floatformat:"2u" }} displays "66666.67"
If the input float is infinity or NaN, display the string representation
of that value.
"""
force_grouping = False
use_l10n = True
if isinstance(arg, str):
last_char = arg[-1]
if arg[-2:] in {"gu", "ug"}:
force_grouping = True
use_l10n = False
arg = arg[:-2] or -1
elif last_char == "g":
force_grouping = True
arg = arg[:-1] or -1
elif last_char == "u":
use_l10n = False
arg = arg[:-1] or -1
try:
input_val = str(text)
d = Decimal(input_val)
except InvalidOperation:
try:
d = Decimal(str(float(text)))
except (ValueError, InvalidOperation, TypeError):
return ""
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(
formats.number_format(
"%d" % (int(d)),
0,
use_l10n=use_l10n,
force_grouping=force_grouping,
)
)
exp = Decimal(1).scaleb(-abs(p))
# Set the precision high enough to avoid an exception (#15789).
tupl = d.as_tuple()
units = len(tupl[1])
units += -tupl[2] if m else tupl[2]
prec = abs(p) + units + 1
# Avoid conversion to scientific notation by accessing `sign`, `digits`,
# and `exponent` from Decimal.as_tuple() directly.
rounded_d = d.quantize(exp, ROUND_HALF_UP, Context(prec=prec))
sign, digits, exponent = rounded_d.as_tuple()
digits = [str(digit) for digit in reversed(digits)]
while len(digits) <= abs(exponent):
digits.append("0")
digits.insert(-exponent, ".")
if sign and rounded_d:
digits.append("-")
number = "".join(reversed(digits))
return mark_safe(
formats.number_format(
number,
abs(p),
use_l10n=use_l10n,
force_grouping=force_grouping,
)
)
@register.filter(is_safe=True)
@stringfilter
def iriencode(value):
"""Escape an IRI value for use in a URL."""
return iri_to_uri(value)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linenumbers(value, autoescape=True):
"""Display text with line numbers."""
lines = value.split("\n")
# Find the maximum width of the line count, for use with zero padding
# string format command
width = str(len(str(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, escape(line))
return mark_safe("\n".join(lines))
@register.filter(is_safe=True)
@stringfilter
def lower(value):
"""Convert a string into all lowercase."""
return value.lower()
@register.filter(is_safe=False)
@stringfilter
def make_list(value):
"""
Return the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
@register.filter(is_safe=True)
@stringfilter
def slugify(value):
"""
Convert to ASCII. Convert spaces to hyphens. Remove characters that aren't
alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip
leading and trailing whitespace.
"""
return _slugify(value)
@register.filter(is_safe=True)
def stringformat(value, arg):
"""
Format the variable according to the arg, a string formatting specifier.
This specifier uses Python string formatting syntax, with the exception
that the leading "%" is dropped.
See https://docs.python.org/library/stdtypes.html#printf-style-string-formatting
for documentation of Python string formatting.
"""
if isinstance(value, tuple):
value = str(value)
try:
return ("%" + str(arg)) % value
except (ValueError, TypeError):
return ""
@register.filter(is_safe=True)
@stringfilter
def title(value):
"""Convert a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m[0].lower(), value.title())
return re.sub(r"\d([A-Z])", lambda m: m[0].lower(), t)
@register.filter(is_safe=True)
@stringfilter
def truncatechars(value, arg):
"""Truncate a string after `arg` number of characters."""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).chars(length)
@register.filter(is_safe=True)
@stringfilter
def truncatechars_html(value, arg):
"""
Truncate HTML after `arg` number of chars.
Preserve newlines in the HTML.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).chars(length, html=True)
@register.filter(is_safe=True)
@stringfilter
def truncatewords(value, arg):
"""
Truncate a string after `arg` number of words.
Remove newlines within the string.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).words(length, truncate=" …")
@register.filter(is_safe=True)
@stringfilter
def truncatewords_html(value, arg):
"""
Truncate HTML after `arg` number of words.
Preserve newlines in the HTML.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).words(length, html=True, truncate=" …")
@register.filter(is_safe=False)
@stringfilter
def upper(value):
"""Convert a string into all uppercase."""
return value.upper()
@register.filter(is_safe=False)
@stringfilter
def urlencode(value, safe=None):
"""
Escape a value for use in a URL.
The ``safe`` parameter determines the characters which should not be
escaped by Python's quote() function. If not provided, use the default safe
characters (but an empty string can be provided when *all* characters
should be escaped).
"""
kwargs = {}
if safe is not None:
kwargs["safe"] = safe
return quote(value, **kwargs)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlize(value, autoescape=True):
"""Convert URLs in plain text into clickable links."""
return mark_safe(_urlize(value, nofollow=True, autoescape=autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlizetrunc(value, limit, autoescape=True):
"""
Convert URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
return mark_safe(
_urlize(value, trim_url_limit=int(limit), nofollow=True, autoescape=autoescape)
)
@register.filter(is_safe=False)
@stringfilter
def wordcount(value):
"""Return the number of words."""
return len(value.split())
@register.filter(is_safe=True)
@stringfilter
def wordwrap(value, arg):
"""Wrap words at `arg` line length."""
return wrap(value, int(arg))
@register.filter(is_safe=True)
@stringfilter
def ljust(value, arg):
"""Left-align the value in a field of a given width."""
return value.ljust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def rjust(value, arg):
"""Right-align the value in a field of a given width."""
return value.rjust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def center(value, arg):
"""Center the value in a field of a given width."""
return value.center(int(arg))
@register.filter
@stringfilter
def cut(value, arg):
"""Remove all values of arg from the given string."""
safe = isinstance(value, SafeData)
value = value.replace(arg, "")
if safe and arg != ";":
return mark_safe(value)
return value
###################
# HTML STRINGS #
###################
@register.filter("escape", is_safe=True)
@stringfilter
def escape_filter(value):
"""Mark the value as a string that should be auto-escaped."""
return conditional_escape(value)
@register.filter(is_safe=True)
@stringfilter
def force_escape(value):
"""
Escape a string's HTML. Return a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
return escape(value)
@register.filter("linebreaks", is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaks_filter(value, autoescape=True):
"""
Replace line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br>``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaksbr(value, autoescape=True):
"""
Convert all newlines in a piece of plain text to HTML line breaks
(``<br>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
value = normalize_newlines(value)
if autoescape:
value = escape(value)
return mark_safe(value.replace("\n", "<br>"))
@register.filter(is_safe=True)
@stringfilter
def safe(value):
"""Mark the value as a string that should not be auto-escaped."""
return mark_safe(value)
@register.filter(is_safe=True)
def safeseq(value):
"""
A "safe" filter for sequences. Mark each element in the sequence,
individually, as safe, after converting them to strings. Return a list
with the results.
"""
return [mark_safe(obj) for obj in value]
@register.filter(is_safe=True)
@stringfilter
def striptags(value):
"""Strip all [X]HTML tags."""
return strip_tags(value)
###################
# LISTS #
###################
def _property_resolver(arg):
"""
When arg is convertible to float, behave like operator.itemgetter(arg)
Otherwise, chain __getitem__() and getattr().
>>> _property_resolver(1)('abc')
'b'
>>> _property_resolver('1')('abc')
Traceback (most recent call last):
...
TypeError: string indices must be integers
>>> class Foo:
... a = 42
... b = 3.14
... c = 'Hey!'
>>> _property_resolver('b')(Foo())
3.14
"""
try:
float(arg)
except ValueError:
if VARIABLE_ATTRIBUTE_SEPARATOR + "_" in arg or arg[0] == "_":
raise AttributeError("Access to private variables is forbidden.")
parts = arg.split(VARIABLE_ATTRIBUTE_SEPARATOR)
def resolve(value):
for part in parts:
try:
value = value[part]
except (AttributeError, IndexError, KeyError, TypeError, ValueError):
value = getattr(value, part)
return value
return resolve
else:
return itemgetter(arg)
@register.filter(is_safe=False)
def dictsort(value, arg):
"""
Given a list of dicts, return that list sorted by the property given in
the argument.
"""
try:
return sorted(value, key=_property_resolver(arg))
except (AttributeError, TypeError):
return ""
@register.filter(is_safe=False)
def dictsortreversed(value, arg):
"""
Given a list of dicts, return that list sorted in reverse order by the
property given in the argument.
"""
try:
return sorted(value, key=_property_resolver(arg), reverse=True)
except (AttributeError, TypeError):
return ""
@register.filter(is_safe=False)
def first(value):
"""Return the first item in a list."""
try:
return value[0]
except IndexError:
return ""
@register.filter(is_safe=True, needs_autoescape=True)
def join(value, arg, autoescape=True):
"""Join a list with a string, like Python's ``str.join(list)``."""
try:
if autoescape:
value = [conditional_escape(v) for v in value]
data = conditional_escape(arg).join(value)
except TypeError: # Fail silently if arg isn't iterable.
return value
return mark_safe(data)
@register.filter(is_safe=True)
def last(value):
"""Return the last item in a list."""
try:
return value[-1]
except IndexError:
return ""
@register.filter(is_safe=False)
def length(value):
"""Return the length of the value - useful for lists."""
try:
return len(value)
except (ValueError, TypeError):
return 0
@register.filter(is_safe=False)
def length_is(value, arg):
"""Return a boolean of whether the value's length is the argument."""
warnings.warn(
"The length_is template filter is deprecated in favor of the length template "
"filter and the == operator within an {% if %} tag.",
RemovedInDjango51Warning,
)
try:
return len(value) == int(arg)
except (ValueError, TypeError):
return ""
@register.filter(is_safe=True)
def random(value):
"""Return a random item from the list."""
return random_module.choice(value)
@register.filter("slice", is_safe=True)
def slice_filter(value, arg):
"""
Return a slice of the list using the same syntax as Python's list slicing.
"""
try:
bits = []
for x in str(arg).split(":"):
if not x:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
@register.filter(is_safe=True, needs_autoescape=True)
def unordered_list(value, autoescape=True):
"""
Recursively take a self-nested list and return an HTML unordered list --
WITHOUT opening and closing <ul> tags.
Assume the list is in the proper format. For example, if ``var`` contains:
``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``, then
``{{ var|unordered_list }}`` returns::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
escaper = conditional_escape
else:
def escaper(x):
return x
def walk_items(item_list):
item_iterator = iter(item_list)
try:
item = next(item_iterator)
while True:
try:
next_item = next(item_iterator)
except StopIteration:
yield item, None
break
if isinstance(next_item, (list, tuple, types.GeneratorType)):
try:
iter(next_item)
except TypeError:
pass
else:
yield item, next_item
item = next(item_iterator)
continue
yield item, None
item = next_item
except StopIteration:
pass
def list_formatter(item_list, tabs=1):
indent = "\t" * tabs
output = []
for item, children in walk_items(item_list):
sublist = ""
if children:
sublist = "\n%s<ul>\n%s\n%s</ul>\n%s" % (
indent,
list_formatter(children, tabs + 1),
indent,
indent,
)
output.append("%s<li>%s%s</li>" % (indent, escaper(item), sublist))
return "\n".join(output)
return mark_safe(list_formatter(value))
###################
# INTEGERS #
###################
@register.filter(is_safe=False)
def add(value, arg):
"""Add the arg to the value."""
try:
return int(value) + int(arg)
except (ValueError, TypeError):
try:
return value + arg
except Exception:
return ""
@register.filter(is_safe=False)
def get_digit(value, arg):
"""
Given a whole number, return the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Return the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
###################
# DATES #
###################
@register.filter(expects_localtime=True, is_safe=False)
def date(value, arg=None):
"""Format a date according to the given format."""
if value in (None, ""):
return ""
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ""
@register.filter(expects_localtime=True, is_safe=False)
def time(value, arg=None):
"""Format a time according to the given format."""
if value in (None, ""):
return ""
try:
return formats.time_format(value, arg)
except (AttributeError, TypeError):
try:
return time_format(value, arg)
except (AttributeError, TypeError):
return ""
@register.filter("timesince", is_safe=False)
def timesince_filter(value, arg=None):
"""Format a date as the time since that date (i.e. "4 days, 6 hours")."""
if not value:
return ""
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return ""
@register.filter("timeuntil", is_safe=False)
def timeuntil_filter(value, arg=None):
"""Format a date as the time until that date (i.e. "4 days, 6 hours")."""
if not value:
return ""
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return ""
###################
# LOGIC #
###################
@register.filter(is_safe=False)
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
@register.filter(is_safe=False)
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
@register.filter(is_safe=False)
def divisibleby(value, arg):
"""Return True if the value is divisible by the argument."""
return int(value) % int(arg) == 0
@register.filter(is_safe=False)
def yesno(value, arg=None):
"""
Given a string mapping values for true, false, and (optionally) None,
return one of those strings according to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
# Translators: Please do not add spaces around commas.
arg = gettext("yes,no,maybe")
bits = arg.split(",")
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
###################
# MISC #
###################
@register.filter(is_safe=True)
def filesizeformat(bytes_):
"""
Format the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc.).
"""
try:
bytes_ = int(bytes_)
except (TypeError, ValueError, UnicodeDecodeError):
value = ngettext("%(size)d byte", "%(size)d bytes", 0) % {"size": 0}
return avoid_wrapping(value)
def filesize_number_format(value):
return formats.number_format(round(value, 1), 1)
KB = 1 << 10
MB = 1 << 20
GB = 1 << 30
TB = 1 << 40
PB = 1 << 50
negative = bytes_ < 0
if negative:
bytes_ = -bytes_ # Allow formatting of negative numbers.
if bytes_ < KB:
value = ngettext("%(size)d byte", "%(size)d bytes", bytes_) % {"size": bytes_}
elif bytes_ < MB:
value = gettext("%s KB") % filesize_number_format(bytes_ / KB)
elif bytes_ < GB:
value = gettext("%s MB") % filesize_number_format(bytes_ / MB)
elif bytes_ < TB:
value = gettext("%s GB") % filesize_number_format(bytes_ / GB)
elif bytes_ < PB:
value = gettext("%s TB") % filesize_number_format(bytes_ / TB)
else:
value = gettext("%s PB") % filesize_number_format(bytes_ / PB)
if negative:
value = "-%s" % value
return avoid_wrapping(value)
@register.filter(is_safe=False)
def pluralize(value, arg="s"):
"""
Return a plural suffix if the value is not 1, '1', or an object of
length 1. By default, use 's' as the suffix:
* If value is 0, vote{{ value|pluralize }} display "votes".
* If value is 1, vote{{ value|pluralize }} display "vote".
* If value is 2, vote{{ value|pluralize }} display "votes".
If an argument is provided, use that string instead:
* If value is 0, class{{ value|pluralize:"es" }} display "classes".
* If value is 1, class{{ value|pluralize:"es" }} display "class".
* If value is 2, class{{ value|pluralize:"es" }} display "classes".
If the provided argument contains a comma, use the text before the comma
for the singular case and the text after the comma for the plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} display "candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} display "candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} display "candies".
"""
if "," not in arg:
arg = "," + arg
bits = arg.split(",")
if len(bits) > 2:
return ""
singular_suffix, plural_suffix = bits[:2]
try:
return singular_suffix if float(value) == 1 else plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
return singular_suffix if len(value) == 1 else plural_suffix
except TypeError: # len() of unsized object.
pass
return ""
@register.filter("phone2numeric", is_safe=True)
def phone2numeric_filter(value):
"""Take a phone number and converts it in to its numerical equivalent."""
return phone2numeric(value)
@register.filter(is_safe=True)
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
try:
return pformat(value)
except Exception as e:
return "Error in formatting: %s: %s" % (e.__class__.__name__, e)
|
c7e91b1eef82a47ed3d6ef32349c010a66b5d44fda4271c1170213af2a6937d8 | """
Field classes.
"""
import copy
import datetime
import json
import math
import operator
import os
import re
import uuid
from decimal import Decimal, DecimalException
from io import BytesIO
from urllib.parse import urlsplit, urlunsplit
from django.core import validators
from django.core.exceptions import ValidationError
from django.forms.boundfield import BoundField
from django.forms.utils import from_current_timezone, to_current_timezone
from django.forms.widgets import (
FILE_INPUT_CONTRADICTION,
CheckboxInput,
ClearableFileInput,
DateInput,
DateTimeInput,
EmailInput,
FileInput,
HiddenInput,
MultipleHiddenInput,
NullBooleanSelect,
NumberInput,
Select,
SelectMultiple,
SplitDateTimeWidget,
SplitHiddenDateTimeWidget,
Textarea,
TextInput,
TimeInput,
URLInput,
)
from django.utils import formats
from django.utils.dateparse import parse_datetime, parse_duration
from django.utils.duration import duration_string
from django.utils.ipv6 import clean_ipv6_address
from django.utils.regex_helper import _lazy_re_compile
from django.utils.translation import gettext_lazy as _
from django.utils.translation import ngettext_lazy
__all__ = (
"Field",
"CharField",
"IntegerField",
"DateField",
"TimeField",
"DateTimeField",
"DurationField",
"RegexField",
"EmailField",
"FileField",
"ImageField",
"URLField",
"BooleanField",
"NullBooleanField",
"ChoiceField",
"MultipleChoiceField",
"ComboField",
"MultiValueField",
"FloatField",
"DecimalField",
"SplitDateTimeField",
"GenericIPAddressField",
"FilePathField",
"JSONField",
"SlugField",
"TypedChoiceField",
"TypedMultipleChoiceField",
"UUIDField",
)
class Field:
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = (
HiddenInput # Default widget to use when rendering this as "hidden".
)
default_validators = [] # Default set of validators
# Add an 'invalid' entry to default_error_message if you want a specific
# field error message not raised by the field validators.
default_error_messages = {
"required": _("This field is required."),
}
empty_values = list(validators.EMPTY_VALUES)
def __init__(
self,
*,
required=True,
widget=None,
label=None,
initial=None,
help_text="",
error_messages=None,
show_hidden_initial=False,
validators=(),
localize=False,
disabled=False,
label_suffix=None,
):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of additional validators to use
# localize -- Boolean that specifies if the field should be localized.
# disabled -- Boolean that specifies whether the field is disabled, that
# is its widget is shown in the form but not editable.
# label_suffix -- Suffix to be added to the label. Overrides
# form's label_suffix.
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
self.help_text = help_text
self.disabled = disabled
self.label_suffix = label_suffix
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
else:
widget = copy.deepcopy(widget)
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, "default_error_messages", {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = [*self.default_validators, *validators]
super().__init__()
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in self.empty_values and self.required:
raise ValidationError(self.error_messages["required"], code="required")
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError as e:
if hasattr(e, "code") and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validate the given value and return its "cleaned" value as an
appropriate Python object. Raise ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
if self.disabled:
return initial
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), return a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def has_changed(self, initial, data):
"""Return True if data differs from initial."""
# Always return False if the field is disabled since self.bound_data
# always uses the initial value in this case.
if self.disabled:
return False
try:
data = self.to_python(data)
if hasattr(self, "_coerce"):
return self._coerce(data) != self._coerce(initial)
except ValidationError:
return True
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or initial value we get
# is None, replace it with ''.
initial_value = initial if initial is not None else ""
data_value = data if data is not None else ""
return initial_value != data_value
def get_bound_field(self, form, field_name):
"""
Return a BoundField instance that will be used when accessing the form
field in a template.
"""
return BoundField(form, self, field_name)
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
result.error_messages = self.error_messages.copy()
result.validators = self.validators[:]
return result
class CharField(Field):
def __init__(
self, *, max_length=None, min_length=None, strip=True, empty_value="", **kwargs
):
self.max_length = max_length
self.min_length = min_length
self.strip = strip
self.empty_value = empty_value
super().__init__(**kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(int(min_length)))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(int(max_length)))
self.validators.append(validators.ProhibitNullCharactersValidator())
def to_python(self, value):
"""Return a string."""
if value not in self.empty_values:
value = str(value)
if self.strip:
value = value.strip()
if value in self.empty_values:
return self.empty_value
return value
def widget_attrs(self, widget):
attrs = super().widget_attrs(widget)
if self.max_length is not None and not widget.is_hidden:
# The HTML attribute is maxlength, not max_length.
attrs["maxlength"] = str(self.max_length)
if self.min_length is not None and not widget.is_hidden:
# The HTML attribute is minlength, not min_length.
attrs["minlength"] = str(self.min_length)
return attrs
class IntegerField(Field):
widget = NumberInput
default_error_messages = {
"invalid": _("Enter a whole number."),
}
re_decimal = _lazy_re_compile(r"\.0*\s*$")
def __init__(self, *, max_value=None, min_value=None, step_size=None, **kwargs):
self.max_value, self.min_value, self.step_size = max_value, min_value, step_size
if kwargs.get("localize") and self.widget == NumberInput:
# Localized number input is not well supported on most browsers
kwargs.setdefault("widget", super().widget)
super().__init__(**kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
if step_size is not None:
self.validators.append(validators.StepValueValidator(step_size))
def to_python(self, value):
"""
Validate that int() can be called on the input. Return the result
of int() or None for empty values.
"""
value = super().to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
# Strip trailing decimal and zeros.
try:
value = int(self.re_decimal.sub("", str(value)))
except (ValueError, TypeError):
raise ValidationError(self.error_messages["invalid"], code="invalid")
return value
def widget_attrs(self, widget):
attrs = super().widget_attrs(widget)
if isinstance(widget, NumberInput):
if self.min_value is not None:
attrs["min"] = self.min_value
if self.max_value is not None:
attrs["max"] = self.max_value
if self.step_size is not None:
attrs["step"] = self.step_size
return attrs
class FloatField(IntegerField):
default_error_messages = {
"invalid": _("Enter a number."),
}
def to_python(self, value):
"""
Validate that float() can be called on the input. Return the result
of float() or None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages["invalid"], code="invalid")
return value
def validate(self, value):
super().validate(value)
if value in self.empty_values:
return
if not math.isfinite(value):
raise ValidationError(self.error_messages["invalid"], code="invalid")
def widget_attrs(self, widget):
attrs = super().widget_attrs(widget)
if isinstance(widget, NumberInput) and "step" not in widget.attrs:
if self.step_size is not None:
step = str(self.step_size)
else:
step = "any"
attrs.setdefault("step", step)
return attrs
class DecimalField(IntegerField):
default_error_messages = {
"invalid": _("Enter a number."),
}
def __init__(
self,
*,
max_value=None,
min_value=None,
max_digits=None,
decimal_places=None,
**kwargs,
):
self.max_digits, self.decimal_places = max_digits, decimal_places
super().__init__(max_value=max_value, min_value=min_value, **kwargs)
self.validators.append(validators.DecimalValidator(max_digits, decimal_places))
def to_python(self, value):
"""
Validate that the input is a decimal number. Return a Decimal
instance or None for empty values. Ensure that there are no more
than max_digits in the number and no more than decimal_places digits
after the decimal point.
"""
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = Decimal(str(value))
except DecimalException:
raise ValidationError(self.error_messages["invalid"], code="invalid")
return value
def validate(self, value):
super().validate(value)
if value in self.empty_values:
return
if not value.is_finite():
raise ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
def widget_attrs(self, widget):
attrs = super().widget_attrs(widget)
if isinstance(widget, NumberInput) and "step" not in widget.attrs:
if self.decimal_places is not None:
# Use exponential notation for small values since they might
# be parsed as 0 otherwise. ref #20765
step = str(Decimal(1).scaleb(-self.decimal_places)).lower()
else:
step = "any"
attrs.setdefault("step", step)
return attrs
class BaseTemporalField(Field):
def __init__(self, *, input_formats=None, **kwargs):
super().__init__(**kwargs)
if input_formats is not None:
self.input_formats = input_formats
def to_python(self, value):
value = value.strip()
# Try to strptime against each input format.
for format in self.input_formats:
try:
return self.strptime(value, format)
except (ValueError, TypeError):
continue
raise ValidationError(self.error_messages["invalid"], code="invalid")
def strptime(self, value, format):
raise NotImplementedError("Subclasses must define this method.")
class DateField(BaseTemporalField):
widget = DateInput
input_formats = formats.get_format_lazy("DATE_INPUT_FORMATS")
default_error_messages = {
"invalid": _("Enter a valid date."),
}
def to_python(self, value):
"""
Validate that the input can be converted to a date. Return a Python
datetime.date object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
return super().to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(value, format).date()
class TimeField(BaseTemporalField):
widget = TimeInput
input_formats = formats.get_format_lazy("TIME_INPUT_FORMATS")
default_error_messages = {"invalid": _("Enter a valid time.")}
def to_python(self, value):
"""
Validate that the input can be converted to a time. Return a Python
datetime.time object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.time):
return value
return super().to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(value, format).time()
class DateTimeFormatsIterator:
def __iter__(self):
yield from formats.get_format("DATETIME_INPUT_FORMATS")
yield from formats.get_format("DATE_INPUT_FORMATS")
class DateTimeField(BaseTemporalField):
widget = DateTimeInput
input_formats = DateTimeFormatsIterator()
default_error_messages = {
"invalid": _("Enter a valid date/time."),
}
def prepare_value(self, value):
if isinstance(value, datetime.datetime):
value = to_current_timezone(value)
return value
def to_python(self, value):
"""
Validate that the input can be converted to a datetime. Return a
Python datetime.datetime object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return from_current_timezone(value)
if isinstance(value, datetime.date):
result = datetime.datetime(value.year, value.month, value.day)
return from_current_timezone(result)
try:
result = parse_datetime(value.strip())
except ValueError:
raise ValidationError(self.error_messages["invalid"], code="invalid")
if not result:
result = super().to_python(value)
return from_current_timezone(result)
def strptime(self, value, format):
return datetime.datetime.strptime(value, format)
class DurationField(Field):
default_error_messages = {
"invalid": _("Enter a valid duration."),
"overflow": _("The number of days must be between {min_days} and {max_days}."),
}
def prepare_value(self, value):
if isinstance(value, datetime.timedelta):
return duration_string(value)
return value
def to_python(self, value):
if value in self.empty_values:
return None
if isinstance(value, datetime.timedelta):
return value
try:
value = parse_duration(str(value))
except OverflowError:
raise ValidationError(
self.error_messages["overflow"].format(
min_days=datetime.timedelta.min.days,
max_days=datetime.timedelta.max.days,
),
code="overflow",
)
if value is None:
raise ValidationError(self.error_messages["invalid"], code="invalid")
return value
class RegexField(CharField):
def __init__(self, regex, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
"""
kwargs.setdefault("strip", False)
super().__init__(**kwargs)
self._set_regex(regex)
def _get_regex(self):
return self._regex
def _set_regex(self, regex):
if isinstance(regex, str):
regex = re.compile(regex)
self._regex = regex
if (
hasattr(self, "_regex_validator")
and self._regex_validator in self.validators
):
self.validators.remove(self._regex_validator)
self._regex_validator = validators.RegexValidator(regex=regex)
self.validators.append(self._regex_validator)
regex = property(_get_regex, _set_regex)
class EmailField(CharField):
widget = EmailInput
default_validators = [validators.validate_email]
def __init__(self, **kwargs):
super().__init__(strip=True, **kwargs)
class FileField(Field):
widget = ClearableFileInput
default_error_messages = {
"invalid": _("No file was submitted. Check the encoding type on the form."),
"missing": _("No file was submitted."),
"empty": _("The submitted file is empty."),
"max_length": ngettext_lazy(
"Ensure this filename has at most %(max)d character (it has %(length)d).",
"Ensure this filename has at most %(max)d characters (it has %(length)d).",
"max",
),
"contradiction": _(
"Please either submit a file or check the clear checkbox, not both."
),
}
def __init__(self, *, max_length=None, allow_empty_file=False, **kwargs):
self.max_length = max_length
self.allow_empty_file = allow_empty_file
super().__init__(**kwargs)
def to_python(self, data):
if data in self.empty_values:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages["invalid"], code="invalid")
if self.max_length is not None and len(file_name) > self.max_length:
params = {"max": self.max_length, "length": len(file_name)}
raise ValidationError(
self.error_messages["max_length"], code="max_length", params=params
)
if not file_name:
raise ValidationError(self.error_messages["invalid"], code="invalid")
if not self.allow_empty_file and not file_size:
raise ValidationError(self.error_messages["empty"], code="empty")
return data
def clean(self, data, initial=None):
# If the widget got contradictory inputs, we raise a validation error
if data is FILE_INPUT_CONTRADICTION:
raise ValidationError(
self.error_messages["contradiction"], code="contradiction"
)
# False means the field value should be cleared; further validation is
# not needed.
if data is False:
if not self.required:
return False
# If the field is required, clearing is not possible (the widget
# shouldn't return False data in that case anyway). False is not
# in self.empty_value; if a False value makes it this far
# it should be validated from here on out as None (so it will be
# caught by the required check).
data = None
if not data and initial:
return initial
return super().clean(data)
def bound_data(self, _, initial):
return initial
def has_changed(self, initial, data):
return not self.disabled and data is not None
class ImageField(FileField):
default_validators = [validators.validate_image_file_extension]
default_error_messages = {
"invalid_image": _(
"Upload a valid image. The file you uploaded was either not an "
"image or a corrupted image."
),
}
def to_python(self, data):
"""
Check that the file-upload field data contains a valid image (GIF, JPG,
PNG, etc. -- whatever Pillow supports).
"""
f = super().to_python(data)
if f is None:
return None
from PIL import Image
# We need to get a file object for Pillow. We might have a path or we might
# have to read the data into memory.
if hasattr(data, "temporary_file_path"):
file = data.temporary_file_path()
else:
if hasattr(data, "read"):
file = BytesIO(data.read())
else:
file = BytesIO(data["content"])
try:
# load() could spot a truncated JPEG, but it loads the entire
# image in memory, which is a DoS vector. See #3848 and #18520.
image = Image.open(file)
# verify() must be called immediately after the constructor.
image.verify()
# Annotating so subclasses can reuse it for their own validation
f.image = image
# Pillow doesn't detect the MIME type of all formats. In those
# cases, content_type will be None.
f.content_type = Image.MIME.get(image.format)
except Exception as exc:
# Pillow doesn't recognize it as an image.
raise ValidationError(
self.error_messages["invalid_image"],
code="invalid_image",
) from exc
if hasattr(f, "seek") and callable(f.seek):
f.seek(0)
return f
def widget_attrs(self, widget):
attrs = super().widget_attrs(widget)
if isinstance(widget, FileInput) and "accept" not in widget.attrs:
attrs.setdefault("accept", "image/*")
return attrs
class URLField(CharField):
widget = URLInput
default_error_messages = {
"invalid": _("Enter a valid URL."),
}
default_validators = [validators.URLValidator()]
def __init__(self, **kwargs):
super().__init__(strip=True, **kwargs)
def to_python(self, value):
def split_url(url):
"""
Return a list of url parts via urlparse.urlsplit(), or raise
ValidationError for some malformed URLs.
"""
try:
return list(urlsplit(url))
except ValueError:
# urlparse.urlsplit can raise a ValueError with some
# misformatted URLs.
raise ValidationError(self.error_messages["invalid"], code="invalid")
value = super().to_python(value)
if value:
url_fields = split_url(value)
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = "http"
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ""
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = split_url(urlunsplit(url_fields))
value = urlunsplit(url_fields)
return value
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Return a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if isinstance(value, str) and value.lower() in ("false", "0"):
value = False
else:
value = bool(value)
return super().to_python(value)
def validate(self, value):
if not value and self.required:
raise ValidationError(self.error_messages["required"], code="required")
def has_changed(self, initial, data):
if self.disabled:
return False
# Sometimes data or initial may be a string equivalent of a boolean
# so we should run it through to_python first to get a boolean value
return self.to_python(initial) != self.to_python(data)
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True, and False. Clean invalid values
to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly check for the string 'True' and 'False', which is what a
hidden field will submit for True and False, for 'true' and 'false',
which are likely to be returned by JavaScript serializations of forms,
and for '1' and '0', which is what a RadioField will submit. Unlike
the Booleanfield, this field must check for True because it doesn't
use the bool() function.
"""
if value in (True, "True", "true", "1"):
return True
elif value in (False, "False", "false", "0"):
return False
else:
return None
def validate(self, value):
pass
class CallableChoiceIterator:
def __init__(self, choices_func):
self.choices_func = choices_func
def __iter__(self):
yield from self.choices_func()
class ChoiceField(Field):
widget = Select
default_error_messages = {
"invalid_choice": _(
"Select a valid choice. %(value)s is not one of the available choices."
),
}
def __init__(self, *, choices=(), **kwargs):
super().__init__(**kwargs)
self.choices = choices
def __deepcopy__(self, memo):
result = super().__deepcopy__(memo)
result._choices = copy.deepcopy(self._choices, memo)
return result
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
if callable(value):
value = CallableChoiceIterator(value)
else:
value = list(value)
self._choices = self.widget.choices = value
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"""Return a string."""
if value in self.empty_values:
return ""
return str(value)
def validate(self, value):
"""Validate that the input is in self.choices."""
super().validate(value)
if value and not self.valid_value(value):
raise ValidationError(
self.error_messages["invalid_choice"],
code="invalid_choice",
params={"value": value},
)
def valid_value(self, value):
"""Check to see if the provided value is a valid choice."""
text_value = str(value)
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == k2 or text_value == str(k2):
return True
else:
if value == k or text_value == str(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *, coerce=lambda val: val, empty_value="", **kwargs):
self.coerce = coerce
self.empty_value = empty_value
super().__init__(**kwargs)
def _coerce(self, value):
"""
Validate that the value can be coerced to the right type (if not empty).
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages["invalid_choice"],
code="invalid_choice",
params={"value": value},
)
return value
def clean(self, value):
value = super().clean(value)
return self._coerce(value)
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
"invalid_choice": _(
"Select a valid choice. %(value)s is not one of the available choices."
),
"invalid_list": _("Enter a list of values."),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(
self.error_messages["invalid_list"], code="invalid_list"
)
return [str(val) for val in value]
def validate(self, value):
"""Validate that the input is a list or tuple."""
if self.required and not value:
raise ValidationError(self.error_messages["required"], code="required")
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(
self.error_messages["invalid_choice"],
code="invalid_choice",
params={"value": val},
)
def has_changed(self, initial, data):
if self.disabled:
return False
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = {str(value) for value in initial}
data_set = {str(value) for value in data}
return data_set != initial_set
class TypedMultipleChoiceField(MultipleChoiceField):
def __init__(self, *, coerce=lambda val: val, **kwargs):
self.coerce = coerce
self.empty_value = kwargs.pop("empty_value", [])
super().__init__(**kwargs)
def _coerce(self, value):
"""
Validate that the values are in self.choices and can be coerced to the
right type.
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
new_value = []
for choice in value:
try:
new_value.append(self.coerce(choice))
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages["invalid_choice"],
code="invalid_choice",
params={"value": choice},
)
return new_value
def clean(self, value):
value = super().clean(value)
return self._coerce(value)
def validate(self, value):
if value != self.empty_value:
super().validate(value)
elif self.required:
raise ValidationError(self.error_messages["required"], code="required")
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields, **kwargs):
super().__init__(**kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validate the given value against all of self.fields, which is a
list of Field instances.
"""
super().clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
Aggregate the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
"invalid": _("Enter a list of values."),
"incomplete": _("Enter a complete value."),
}
def __init__(self, fields, *, require_all_fields=True, **kwargs):
self.require_all_fields = require_all_fields
super().__init__(**kwargs)
for f in fields:
f.error_messages.setdefault("incomplete", self.error_messages["incomplete"])
if self.disabled:
f.disabled = True
if self.require_all_fields:
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not
# by those individual fields.
f.required = False
self.fields = fields
def __deepcopy__(self, memo):
result = super().__deepcopy__(memo)
result.fields = tuple(x.__deepcopy__(memo) for x in self.fields)
return result
def validate(self, value):
pass
def clean(self, value):
"""
Validate every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = []
if self.disabled and not isinstance(value, list):
value = self.widget.decompress(value)
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in self.empty_values]:
if self.required:
raise ValidationError(
self.error_messages["required"], code="required"
)
else:
return self.compress([])
else:
raise ValidationError(self.error_messages["invalid"], code="invalid")
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if field_value in self.empty_values:
if self.require_all_fields:
# Raise a 'required' error if the MultiValueField is
# required and any field is empty.
if self.required:
raise ValidationError(
self.error_messages["required"], code="required"
)
elif field.required:
# Otherwise, add an 'incomplete' error to the list of
# collected errors and skip field cleaning, if a required
# field is empty.
if field.error_messages["incomplete"] not in errors:
errors.append(field.error_messages["incomplete"])
continue
try:
clean_data.append(field.clean(field_value))
except ValidationError as e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter. Skip duplicates.
errors.extend(m for m in e.error_list if m not in errors)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
self.run_validators(out)
return out
def compress(self, data_list):
"""
Return a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError("Subclasses must implement this method.")
def has_changed(self, initial, data):
if self.disabled:
return False
if initial is None:
initial = ["" for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.widget.decompress(initial)
for field, initial, data in zip(self.fields, initial, data):
try:
initial = field.to_python(initial)
except ValidationError:
return True
if field.has_changed(initial, data):
return True
return False
class FilePathField(ChoiceField):
def __init__(
self,
path,
*,
match=None,
recursive=False,
allow_files=True,
allow_folders=False,
**kwargs,
):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
super().__init__(choices=(), **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in sorted(os.walk(self.path)):
if self.allow_files:
for f in sorted(files):
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
if self.allow_folders:
for f in sorted(dirs):
if f == "__pycache__":
continue
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
choices = []
with os.scandir(self.path) as entries:
for f in entries:
if f.name == "__pycache__":
continue
if (
(self.allow_files and f.is_file())
or (self.allow_folders and f.is_dir())
) and (self.match is None or self.match_re.search(f.name)):
choices.append((f.path, f.name))
choices.sort(key=operator.itemgetter(1))
self.choices.extend(choices)
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
"invalid_date": _("Enter a valid date."),
"invalid_time": _("Enter a valid time."),
}
def __init__(self, *, input_date_formats=None, input_time_formats=None, **kwargs):
errors = self.default_error_messages.copy()
if "error_messages" in kwargs:
errors.update(kwargs["error_messages"])
localize = kwargs.get("localize", False)
fields = (
DateField(
input_formats=input_date_formats,
error_messages={"invalid": errors["invalid_date"]},
localize=localize,
),
TimeField(
input_formats=input_time_formats,
error_messages={"invalid": errors["invalid_time"]},
localize=localize,
),
)
super().__init__(fields, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in self.empty_values:
raise ValidationError(
self.error_messages["invalid_date"], code="invalid_date"
)
if data_list[1] in self.empty_values:
raise ValidationError(
self.error_messages["invalid_time"], code="invalid_time"
)
result = datetime.datetime.combine(*data_list)
return from_current_timezone(result)
return None
class GenericIPAddressField(CharField):
def __init__(self, *, protocol="both", unpack_ipv4=False, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators = validators.ip_address_validators(
protocol, unpack_ipv4
)[0]
super().__init__(**kwargs)
def to_python(self, value):
if value in self.empty_values:
return ""
value = value.strip()
if value and ":" in value:
return clean_ipv6_address(value, self.unpack_ipv4)
return value
class SlugField(CharField):
default_validators = [validators.validate_slug]
def __init__(self, *, allow_unicode=False, **kwargs):
self.allow_unicode = allow_unicode
if self.allow_unicode:
self.default_validators = [validators.validate_unicode_slug]
super().__init__(**kwargs)
class UUIDField(CharField):
default_error_messages = {
"invalid": _("Enter a valid UUID."),
}
def prepare_value(self, value):
if isinstance(value, uuid.UUID):
return str(value)
return value
def to_python(self, value):
value = super().to_python(value)
if value in self.empty_values:
return None
if not isinstance(value, uuid.UUID):
try:
value = uuid.UUID(value)
except ValueError:
raise ValidationError(self.error_messages["invalid"], code="invalid")
return value
class InvalidJSONInput(str):
pass
class JSONString(str):
pass
class JSONField(CharField):
default_error_messages = {
"invalid": _("Enter a valid JSON."),
}
widget = Textarea
def __init__(self, encoder=None, decoder=None, **kwargs):
self.encoder = encoder
self.decoder = decoder
super().__init__(**kwargs)
def to_python(self, value):
if self.disabled:
return value
if value in self.empty_values:
return None
elif isinstance(value, (list, dict, int, float, JSONString)):
return value
try:
converted = json.loads(value, cls=self.decoder)
except json.JSONDecodeError:
raise ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
if isinstance(converted, str):
return JSONString(converted)
else:
return converted
def bound_data(self, data, initial):
if self.disabled:
return initial
if data is None:
return None
try:
return json.loads(data, cls=self.decoder)
except json.JSONDecodeError:
return InvalidJSONInput(data)
def prepare_value(self, value):
if isinstance(value, InvalidJSONInput):
return value
return json.dumps(value, ensure_ascii=False, cls=self.encoder)
def has_changed(self, initial, data):
if super().has_changed(initial, data):
return True
# For purposes of seeing whether something has changed, True isn't the
# same as 1 and the order of keys doesn't matter.
return json.dumps(initial, sort_keys=True, cls=self.encoder) != json.dumps(
self.to_python(data), sort_keys=True, cls=self.encoder
)
|
e6ce1180f86eaed4c02748c59cac3ff87076422d21ea4a8fb5451d323595ab3e | from datetime import datetime
from django.conf import settings
from django.db.models.expressions import Func
from django.db.models.fields import (
DateField,
DateTimeField,
DurationField,
Field,
IntegerField,
TimeField,
)
from django.db.models.lookups import (
Transform,
YearExact,
YearGt,
YearGte,
YearLt,
YearLte,
)
from django.utils import timezone
class TimezoneMixin:
tzinfo = None
def get_tzname(self):
# Timezone conversions must happen to the input datetime *before*
# applying a function. 2015-12-31 23:00:00 -02:00 is stored in the
# database as 2016-01-01 01:00:00 +00:00. Any results should be
# based on the input datetime not the stored datetime.
tzname = None
if settings.USE_TZ:
if self.tzinfo is None:
tzname = timezone.get_current_timezone_name()
else:
tzname = timezone._get_timezone_name(self.tzinfo)
return tzname
class Extract(TimezoneMixin, Transform):
lookup_name = None
output_field = IntegerField()
def __init__(self, expression, lookup_name=None, tzinfo=None, **extra):
if self.lookup_name is None:
self.lookup_name = lookup_name
if self.lookup_name is None:
raise ValueError("lookup_name must be provided")
self.tzinfo = tzinfo
super().__init__(expression, **extra)
def as_sql(self, compiler, connection):
sql, params = compiler.compile(self.lhs)
lhs_output_field = self.lhs.output_field
if isinstance(lhs_output_field, DateTimeField):
tzname = self.get_tzname()
sql, params = connection.ops.datetime_extract_sql(
self.lookup_name, sql, tuple(params), tzname
)
elif self.tzinfo is not None:
raise ValueError("tzinfo can only be used with DateTimeField.")
elif isinstance(lhs_output_field, DateField):
sql, params = connection.ops.date_extract_sql(
self.lookup_name, sql, tuple(params)
)
elif isinstance(lhs_output_field, TimeField):
sql, params = connection.ops.time_extract_sql(
self.lookup_name, sql, tuple(params)
)
elif isinstance(lhs_output_field, DurationField):
if not connection.features.has_native_duration_field:
raise ValueError(
"Extract requires native DurationField database support."
)
sql, params = connection.ops.time_extract_sql(
self.lookup_name, sql, tuple(params)
)
else:
# resolve_expression has already validated the output_field so this
# assert should never be hit.
assert False, "Tried to Extract from an invalid type."
return sql, params
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
copy = super().resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
field = getattr(copy.lhs, "output_field", None)
if field is None:
return copy
if not isinstance(field, (DateField, DateTimeField, TimeField, DurationField)):
raise ValueError(
"Extract input expression must be DateField, DateTimeField, "
"TimeField, or DurationField."
)
# Passing dates to functions expecting datetimes is most likely a mistake.
if type(field) == DateField and copy.lookup_name in (
"hour",
"minute",
"second",
):
raise ValueError(
"Cannot extract time component '%s' from DateField '%s'."
% (copy.lookup_name, field.name)
)
if isinstance(field, DurationField) and copy.lookup_name in (
"year",
"iso_year",
"month",
"week",
"week_day",
"iso_week_day",
"quarter",
):
raise ValueError(
"Cannot extract component '%s' from DurationField '%s'."
% (copy.lookup_name, field.name)
)
return copy
class ExtractYear(Extract):
lookup_name = "year"
class ExtractIsoYear(Extract):
"""Return the ISO-8601 week-numbering year."""
lookup_name = "iso_year"
class ExtractMonth(Extract):
lookup_name = "month"
class ExtractDay(Extract):
lookup_name = "day"
class ExtractWeek(Extract):
"""
Return 1-52 or 53, based on ISO-8601, i.e., Monday is the first of the
week.
"""
lookup_name = "week"
class ExtractWeekDay(Extract):
"""
Return Sunday=1 through Saturday=7.
To replicate this in Python: (mydatetime.isoweekday() % 7) + 1
"""
lookup_name = "week_day"
class ExtractIsoWeekDay(Extract):
"""Return Monday=1 through Sunday=7, based on ISO-8601."""
lookup_name = "iso_week_day"
class ExtractQuarter(Extract):
lookup_name = "quarter"
class ExtractHour(Extract):
lookup_name = "hour"
class ExtractMinute(Extract):
lookup_name = "minute"
class ExtractSecond(Extract):
lookup_name = "second"
DateField.register_lookup(ExtractYear)
DateField.register_lookup(ExtractMonth)
DateField.register_lookup(ExtractDay)
DateField.register_lookup(ExtractWeekDay)
DateField.register_lookup(ExtractIsoWeekDay)
DateField.register_lookup(ExtractWeek)
DateField.register_lookup(ExtractIsoYear)
DateField.register_lookup(ExtractQuarter)
TimeField.register_lookup(ExtractHour)
TimeField.register_lookup(ExtractMinute)
TimeField.register_lookup(ExtractSecond)
DateTimeField.register_lookup(ExtractHour)
DateTimeField.register_lookup(ExtractMinute)
DateTimeField.register_lookup(ExtractSecond)
ExtractYear.register_lookup(YearExact)
ExtractYear.register_lookup(YearGt)
ExtractYear.register_lookup(YearGte)
ExtractYear.register_lookup(YearLt)
ExtractYear.register_lookup(YearLte)
ExtractIsoYear.register_lookup(YearExact)
ExtractIsoYear.register_lookup(YearGt)
ExtractIsoYear.register_lookup(YearGte)
ExtractIsoYear.register_lookup(YearLt)
ExtractIsoYear.register_lookup(YearLte)
class Now(Func):
template = "CURRENT_TIMESTAMP"
output_field = DateTimeField()
def as_postgresql(self, compiler, connection, **extra_context):
# PostgreSQL's CURRENT_TIMESTAMP means "the time at the start of the
# transaction". Use STATEMENT_TIMESTAMP to be cross-compatible with
# other databases.
return self.as_sql(
compiler, connection, template="STATEMENT_TIMESTAMP()", **extra_context
)
def as_mysql(self, compiler, connection, **extra_context):
return self.as_sql(
compiler, connection, template="CURRENT_TIMESTAMP(6)", **extra_context
)
def as_sqlite(self, compiler, connection, **extra_context):
return self.as_sql(
compiler,
connection,
template="STRFTIME('%%%%Y-%%%%m-%%%%d %%%%H:%%%%M:%%%%f', 'NOW')",
**extra_context,
)
class TruncBase(TimezoneMixin, Transform):
kind = None
tzinfo = None
# RemovedInDjango50Warning: when the deprecation ends, remove is_dst
# argument.
def __init__(
self,
expression,
output_field=None,
tzinfo=None,
is_dst=timezone.NOT_PASSED,
**extra,
):
self.tzinfo = tzinfo
self.is_dst = is_dst
super().__init__(expression, output_field=output_field, **extra)
def as_sql(self, compiler, connection):
sql, params = compiler.compile(self.lhs)
tzname = None
if isinstance(self.lhs.output_field, DateTimeField):
tzname = self.get_tzname()
elif self.tzinfo is not None:
raise ValueError("tzinfo can only be used with DateTimeField.")
if isinstance(self.output_field, DateTimeField):
sql, params = connection.ops.datetime_trunc_sql(
self.kind, sql, tuple(params), tzname
)
elif isinstance(self.output_field, DateField):
sql, params = connection.ops.date_trunc_sql(
self.kind, sql, tuple(params), tzname
)
elif isinstance(self.output_field, TimeField):
sql, params = connection.ops.time_trunc_sql(
self.kind, sql, tuple(params), tzname
)
else:
raise ValueError(
"Trunc only valid on DateField, TimeField, or DateTimeField."
)
return sql, params
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
copy = super().resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
field = copy.lhs.output_field
# DateTimeField is a subclass of DateField so this works for both.
if not isinstance(field, (DateField, TimeField)):
raise TypeError(
"%r isn't a DateField, TimeField, or DateTimeField." % field.name
)
# If self.output_field was None, then accessing the field will trigger
# the resolver to assign it to self.lhs.output_field.
if not isinstance(copy.output_field, (DateField, DateTimeField, TimeField)):
raise ValueError(
"output_field must be either DateField, TimeField, or DateTimeField"
)
# Passing dates or times to functions expecting datetimes is most
# likely a mistake.
class_output_field = (
self.__class__.output_field
if isinstance(self.__class__.output_field, Field)
else None
)
output_field = class_output_field or copy.output_field
has_explicit_output_field = (
class_output_field or field.__class__ is not copy.output_field.__class__
)
if type(field) == DateField and (
isinstance(output_field, DateTimeField)
or copy.kind in ("hour", "minute", "second", "time")
):
raise ValueError(
"Cannot truncate DateField '%s' to %s."
% (
field.name,
output_field.__class__.__name__
if has_explicit_output_field
else "DateTimeField",
)
)
elif isinstance(field, TimeField) and (
isinstance(output_field, DateTimeField)
or copy.kind in ("year", "quarter", "month", "week", "day", "date")
):
raise ValueError(
"Cannot truncate TimeField '%s' to %s."
% (
field.name,
output_field.__class__.__name__
if has_explicit_output_field
else "DateTimeField",
)
)
return copy
def convert_value(self, value, expression, connection):
if isinstance(self.output_field, DateTimeField):
if not settings.USE_TZ:
pass
elif value is not None:
value = value.replace(tzinfo=None)
value = timezone.make_aware(value, self.tzinfo, is_dst=self.is_dst)
elif not connection.features.has_zoneinfo_database:
raise ValueError(
"Database returned an invalid datetime value. Are time "
"zone definitions for your database installed?"
)
elif isinstance(value, datetime):
if value is None:
pass
elif isinstance(self.output_field, DateField):
value = value.date()
elif isinstance(self.output_field, TimeField):
value = value.time()
return value
class Trunc(TruncBase):
# RemovedInDjango50Warning: when the deprecation ends, remove is_dst
# argument.
def __init__(
self,
expression,
kind,
output_field=None,
tzinfo=None,
is_dst=timezone.NOT_PASSED,
**extra,
):
self.kind = kind
super().__init__(
expression, output_field=output_field, tzinfo=tzinfo, is_dst=is_dst, **extra
)
class TruncYear(TruncBase):
kind = "year"
class TruncQuarter(TruncBase):
kind = "quarter"
class TruncMonth(TruncBase):
kind = "month"
class TruncWeek(TruncBase):
"""Truncate to midnight on the Monday of the week."""
kind = "week"
class TruncDay(TruncBase):
kind = "day"
class TruncDate(TruncBase):
kind = "date"
lookup_name = "date"
output_field = DateField()
def as_sql(self, compiler, connection):
# Cast to date rather than truncate to date.
sql, params = compiler.compile(self.lhs)
tzname = self.get_tzname()
return connection.ops.datetime_cast_date_sql(sql, tuple(params), tzname)
class TruncTime(TruncBase):
kind = "time"
lookup_name = "time"
output_field = TimeField()
def as_sql(self, compiler, connection):
# Cast to time rather than truncate to time.
sql, params = compiler.compile(self.lhs)
tzname = self.get_tzname()
return connection.ops.datetime_cast_time_sql(sql, tuple(params), tzname)
class TruncHour(TruncBase):
kind = "hour"
class TruncMinute(TruncBase):
kind = "minute"
class TruncSecond(TruncBase):
kind = "second"
DateTimeField.register_lookup(TruncDate)
DateTimeField.register_lookup(TruncTime)
|
2ae42058fcb36ec22cbfaaba8ba44cd71dea0bd226d072c80062730748bc9afb | import collections
import json
import re
from functools import partial
from itertools import chain
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DatabaseError, NotSupportedError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import F, OrderBy, RawSQL, Ref, Value
from django.db.models.functions import Cast, Random
from django.db.models.lookups import Lookup
from django.db.models.query_utils import select_related_descend
from django.db.models.sql.constants import (
CURSOR,
GET_ITERATOR_CHUNK_SIZE,
MULTI,
NO_RESULTS,
ORDER_DIR,
SINGLE,
)
from django.db.models.sql.query import Query, get_order_dir
from django.db.models.sql.where import AND
from django.db.transaction import TransactionManagementError
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
from django.utils.regex_helper import _lazy_re_compile
class SQLCompiler:
# Multiline ordering SQL clause may appear from RawSQL.
ordering_parts = _lazy_re_compile(
r"^(.*)\s(?:ASC|DESC).*",
re.MULTILINE | re.DOTALL,
)
def __init__(self, query, connection, using, elide_empty=True):
self.query = query
self.connection = connection
self.using = using
# Some queries, e.g. coalesced aggregation, need to be executed even if
# they would return an empty result set.
self.elide_empty = elide_empty
self.quote_cache = {"*": "*"}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self._meta_ordering = None
def __repr__(self):
return (
f"<{self.__class__.__qualname__} "
f"model={self.query.model.__qualname__} "
f"connection={self.connection!r} using={self.using!r}>"
)
def setup_query(self, with_col_aliases=False):
if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select(
with_col_aliases=with_col_aliases,
)
self.col_count = len(self.select)
def pre_sql_setup(self, with_col_aliases=False):
"""
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query(with_col_aliases=with_col_aliases)
order_by = self.get_order_by()
self.where, self.having, self.qualify = self.query.where.split_having_qualify(
must_group_by=self.query.group_by is not None
)
extra_select = self.get_extra_select(order_by, self.select)
self.has_extra_select = bool(extra_select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk')
# .annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
allows_group_by_refs = self.connection.features.allows_group_by_refs
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, "as_sql"):
expr = self.query.resolve_ref(expr)
if not allows_group_by_refs and isinstance(expr, Ref):
expr = expr.source
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
ref_sources = {expr.source for expr in expressions if isinstance(expr, Ref)}
aliased_exprs = {}
for expr, _, alias in select:
# Skip members of the select clause that are already included
# by reference.
if expr in ref_sources:
continue
if alias:
aliased_exprs[expr] = alias
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
if not self._meta_ordering:
for expr, (sql, params, is_ref) in order_by:
# Skip references to the SELECT clause, as all expressions in
# the SELECT clause are already part of the GROUP BY.
if not is_ref:
expressions.extend(expr.get_group_by_cols())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
if allows_group_by_refs and (alias := aliased_exprs.get(expr)):
expr = Ref(alias, expr)
try:
sql, params = self.compile(expr)
except EmptyResultSet:
continue
sql, params = expr.select_format(self, sql, params)
params_hash = make_hashable(params)
if (sql, params_hash) not in seen:
result.append((sql, params))
seen.add((sql, params_hash))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key.
if self.connection.features.allows_group_by_pk:
# Determine if the main model's primary key is in the query.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (
getattr(expr, "target", None) == self.query.model._meta.pk
and getattr(expr, "alias", None) == self.query.base_table
):
pk = expr
break
# If the main model's primary key is in the query, group by that
# field, HAVING expressions, and expressions associated with tables
# that don't have a primary key included in the grouped columns.
if pk:
pk_aliases = {
expr.alias
for expr in expressions
if hasattr(expr, "target") and expr.target.primary_key
}
expressions = [pk] + [
expr
for expr in expressions
if expr in having
or (
getattr(expr, "alias", None) is not None
and expr.alias not in pk_aliases
)
]
elif self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
# Unmanaged models are excluded because they could be representing
# database views on which the optimization might not be allowed.
pks = {
expr
for expr in expressions
if (
hasattr(expr, "target")
and expr.target.primary_key
and self.connection.features.allows_group_by_selected_pks_on_model(
expr.target.model
)
)
}
aliases = {expr.alias for expr in pks}
expressions = [
expr
for expr in expressions
if expr in pks or getattr(expr, "alias", None) not in aliases
]
return expressions
def get_select(self, with_col_aliases=False):
"""
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- The base model of the query.
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
select_mask = self.query.get_select_mask()
if self.query.default_cols:
cols = self.get_default_columns(select_mask)
else:
# self.query.select is a special case. These columns never go to
# any model.
cols = self.query.select
if cols:
select_list = []
for col in cols:
select_list.append(select_idx)
select.append((col, None))
select_idx += 1
klass_info = {
"model": self.query.model,
"select_fields": select_list,
}
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select, select_mask)
klass_info["related_klass_infos"] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info["related_klass_infos"]:
if ki["from_parent"]:
ki["select_fields"] = (
klass_info["select_fields"] + ki["select_fields"]
)
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
col_idx = 1
for col, alias in select:
try:
sql, params = self.compile(col)
except EmptyResultSet:
empty_result_set_value = getattr(
col, "empty_result_set_value", NotImplemented
)
if empty_result_set_value is NotImplemented:
# Select a predicate that's always False.
sql, params = "0", ()
else:
sql, params = self.compile(Value(empty_result_set_value))
else:
sql, params = col.select_format(self, sql, params)
if alias is None and with_col_aliases:
alias = f"col{col_idx}"
col_idx += 1
ret.append((col, (sql, params), alias))
return ret, klass_info, annotations
def _order_by_pairs(self):
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
elif self.query.order_by:
ordering = self.query.order_by
elif (meta := self.query.get_meta()) and meta.ordering:
ordering = meta.ordering
self._meta_ordering = ordering
else:
ordering = []
if self.query.standard_ordering:
default_order, _ = ORDER_DIR["ASC"]
else:
default_order, _ = ORDER_DIR["DESC"]
for field in ordering:
if hasattr(field, "resolve_expression"):
if isinstance(field, Value):
# output_field must be resolved for constants.
field = Cast(field, field.output_field)
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field = field.copy()
field.reverse_ordering()
if isinstance(field.expression, F) and (
annotation := self.query.annotation_select.get(
field.expression.name
)
):
field.expression = Ref(field.expression.name, annotation)
yield field, isinstance(field.expression, Ref)
continue
if field == "?": # random
yield OrderBy(Random()), False
continue
col, order = get_order_dir(field, default_order)
descending = order == "DESC"
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
yield (
OrderBy(
Ref(col, self.query.annotation_select[col]),
descending=descending,
),
True,
)
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT
# clause.
if self.query.combinator and self.select:
# Don't use the resolved annotation because other
# combinated queries might define it differently.
expr = F(col)
else:
expr = self.query.annotations[col]
if isinstance(expr, Value):
# output_field must be resolved for constants.
expr = Cast(expr, expr.output_field)
yield OrderBy(expr, descending=descending), False
continue
if "." in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split(".", 1)
yield (
OrderBy(
RawSQL(
"%s.%s" % (self.quote_name_unless_alias(table), col), []
),
descending=descending,
),
False,
)
continue
if self.query.extra and col in self.query.extra:
if col in self.query.extra_select:
yield (
OrderBy(
Ref(col, RawSQL(*self.query.extra[col])),
descending=descending,
),
True,
)
else:
yield (
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False,
)
else:
if self.query.combinator and self.select:
# Don't use the first model's field because other
# combinated queries might define it differently.
yield OrderBy(F(col), descending=descending), False
else:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
yield from self.find_ordering_name(
field,
self.query.get_meta(),
default_order=default_order,
)
def get_order_by(self):
"""
Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for
the ORDER BY clause.
The order_by clause can alter the select clause (for example it can add
aliases to clauses that do not yet have one, or it can add totally new
select clauses).
"""
result = []
seen = set()
for expr, is_ref in self._order_by_pairs():
resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)
if not is_ref and self.query.combinator and self.select:
src = resolved.expression
expr_src = expr.expression
for sel_expr, _, col_alias in self.select:
if col_alias and not (
isinstance(expr_src, F) and col_alias == expr_src.name
):
continue
if src == sel_expr:
resolved.set_source_expressions(
[Ref(col_alias if col_alias else src.target.column, src)]
)
break
else:
if col_alias:
raise DatabaseError(
"ORDER BY term does not match any column in the result set."
)
# Add column used in ORDER BY clause to the selected
# columns and to each combined query.
order_by_idx = len(self.query.select) + 1
col_name = f"__orderbycol{order_by_idx}"
for q in self.query.combined_queries:
q.add_annotation(expr_src, col_name)
self.query.add_select_col(resolved, col_name)
resolved.set_source_expressions([RawSQL(f"{order_by_idx}", ())])
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql)[1]
params_hash = make_hashable(params)
if (without_ordering, params_hash) in seen:
continue
seen.add((without_ordering, params_hash))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
if self.query.distinct and not self.query.distinct_fields:
select_sql = [t[1] for t in select]
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql)[1]
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if (
(name in self.query.alias_map and name not in self.query.table_map)
or name in self.query.extra_select
or (
self.query.external_aliases.get(name)
and name not in self.query.table_map
)
):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node):
vendor_impl = getattr(node, "as_" + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
return sql, params
def get_combinator_sql(self, combinator, all):
features = self.connection.features
compilers = [
query.get_compiler(self.using, self.connection, self.elide_empty)
for query in self.query.combined_queries
if not query.is_empty()
]
if not features.supports_slicing_ordering_in_compound:
for compiler in compilers:
if compiler.query.is_sliced:
raise DatabaseError(
"LIMIT/OFFSET not allowed in subqueries of compound statements."
)
if compiler.get_order_by():
raise DatabaseError(
"ORDER BY not allowed in subqueries of compound statements."
)
elif self.query.is_sliced and combinator == "union":
limit = (self.query.low_mark, self.query.high_mark)
for compiler in compilers:
if not compiler.query.is_sliced:
compiler.query.set_limits(*limit)
parts = ()
for compiler in compilers:
try:
# If the columns list is limited, then all combined queries
# must have the same columns list. Set the selects defined on
# the query on all combined queries, if not already set.
if not compiler.query.values_select and self.query.values_select:
compiler.query = compiler.query.clone()
compiler.query.set_values(
(
*self.query.extra_select,
*self.query.values_select,
*self.query.annotation_select,
)
)
part_sql, part_args = compiler.as_sql()
if compiler.query.combinator:
# Wrap in a subquery if wrapping in parentheses isn't
# supported.
if not features.supports_parentheses_in_compound:
part_sql = "SELECT * FROM ({})".format(part_sql)
# Add parentheses when combining with compound query if not
# already added for all compound queries.
elif (
self.query.subquery
or not features.supports_slicing_ordering_in_compound
):
part_sql = "({})".format(part_sql)
elif (
self.query.subquery
and features.supports_slicing_ordering_in_compound
):
part_sql = "({})".format(part_sql)
parts += ((part_sql, part_args),)
except EmptyResultSet:
# Omit the empty queryset with UNION and with DIFFERENCE if the
# first queryset is nonempty.
if combinator == "union" or (combinator == "difference" and parts):
continue
raise
if not parts:
raise EmptyResultSet
combinator_sql = self.connection.ops.set_operators[combinator]
if all and combinator == "union":
combinator_sql += " ALL"
braces = "{}"
if not self.query.subquery and features.supports_slicing_ordering_in_compound:
braces = "({})"
sql_parts, args_parts = zip(
*((braces.format(sql), args) for sql, args in parts)
)
result = [" {} ".format(combinator_sql).join(sql_parts)]
params = []
for part in args_parts:
params.extend(part)
return result, params
def get_qualify_sql(self):
where_parts = []
if self.where:
where_parts.append(self.where)
if self.having:
where_parts.append(self.having)
inner_query = self.query.clone()
inner_query.subquery = True
inner_query.where = inner_query.where.__class__(where_parts)
# Augment the inner query with any window function references that
# might have been masked via values() and alias(). If any masked
# aliases are added they'll be masked again to avoid fetching
# the data in the `if qual_aliases` branch below.
select = {
expr: alias for expr, _, alias in self.get_select(with_col_aliases=True)[0]
}
select_aliases = set(select.values())
qual_aliases = set()
replacements = {}
def collect_replacements(expressions):
while expressions:
expr = expressions.pop()
if expr in replacements:
continue
elif select_alias := select.get(expr):
replacements[expr] = select_alias
elif isinstance(expr, Lookup):
expressions.extend(expr.get_source_expressions())
elif isinstance(expr, Ref):
if expr.refs not in select_aliases:
expressions.extend(expr.get_source_expressions())
else:
num_qual_alias = len(qual_aliases)
select_alias = f"qual{num_qual_alias}"
qual_aliases.add(select_alias)
inner_query.add_annotation(expr, select_alias)
replacements[expr] = select_alias
collect_replacements(list(self.qualify.leaves()))
self.qualify = self.qualify.replace_expressions(
{expr: Ref(alias, expr) for expr, alias in replacements.items()}
)
order_by = []
for order_by_expr, *_ in self.get_order_by():
collect_replacements(order_by_expr.get_source_expressions())
order_by.append(
order_by_expr.replace_expressions(
{expr: Ref(alias, expr) for expr, alias in replacements.items()}
)
)
inner_query_compiler = inner_query.get_compiler(
self.using, elide_empty=self.elide_empty
)
inner_sql, inner_params = inner_query_compiler.as_sql(
# The limits must be applied to the outer query to avoid pruning
# results too eagerly.
with_limits=False,
# Force unique aliasing of selected columns to avoid collisions
# and make rhs predicates referencing easier.
with_col_aliases=True,
)
qualify_sql, qualify_params = self.compile(self.qualify)
result = [
"SELECT * FROM (",
inner_sql,
")",
self.connection.ops.quote_name("qualify"),
"WHERE",
qualify_sql,
]
if qual_aliases:
# If some select aliases were unmasked for filtering purposes they
# must be masked back.
cols = [self.connection.ops.quote_name(alias) for alias in select.values()]
result = [
"SELECT",
", ".join(cols),
"FROM (",
*result,
")",
self.connection.ops.quote_name("qualify_mask"),
]
params = list(inner_params) + qualify_params
# As the SQL spec is unclear on whether or not derived tables
# ordering must propagate it has to be explicitly repeated on the
# outer-most query to ensure it's preserved.
if order_by:
ordering_sqls = []
for ordering in order_by:
ordering_sql, ordering_params = self.compile(ordering)
ordering_sqls.append(ordering_sql)
params.extend(ordering_params)
result.extend(["ORDER BY", ", ".join(ordering_sqls)])
return result, params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup(
with_col_aliases=with_col_aliases,
)
for_update_part = None
# Is a LIMIT/OFFSET clause needed?
with_limit_offset = with_limits and (
self.query.high_mark is not None or self.query.low_mark
)
combinator = self.query.combinator
features = self.connection.features
if combinator:
if not getattr(features, "supports_select_{}".format(combinator)):
raise NotSupportedError(
"{} is not supported on this database backend.".format(
combinator
)
)
result, params = self.get_combinator_sql(
combinator, self.query.combinator_all
)
elif self.qualify:
result, params = self.get_qualify_sql()
order_by = None
else:
distinct_fields, distinct_params = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct'
# (see docstring of get_from_clause() for details).
from_, f_params = self.get_from_clause()
try:
where, w_params = (
self.compile(self.where) if self.where is not None else ("", [])
)
except EmptyResultSet:
if self.elide_empty:
raise
# Use a predicate that's always False.
where, w_params = "0 = 1", []
having, h_params = (
self.compile(self.having) if self.having is not None else ("", [])
)
result = ["SELECT"]
params = []
if self.query.distinct:
distinct_result, distinct_params = self.connection.ops.distinct_sql(
distinct_fields,
distinct_params,
)
result += distinct_result
params += distinct_params
out_cols = []
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = "%s AS %s" % (
s_sql,
self.connection.ops.quote_name(alias),
)
params.extend(s_params)
out_cols.append(s_sql)
result += [", ".join(out_cols)]
if from_:
result += ["FROM", *from_]
elif self.connection.features.bare_select_suffix:
result += [self.connection.features.bare_select_suffix]
params.extend(f_params)
if self.query.select_for_update and features.has_select_for_update:
if (
self.connection.get_autocommit()
# Don't raise an exception when database doesn't
# support transactions, as it's a noop.
and features.supports_transactions
):
raise TransactionManagementError(
"select_for_update cannot be used outside of a transaction."
)
if (
with_limit_offset
and not features.supports_select_for_update_with_limit
):
raise NotSupportedError(
"LIMIT/OFFSET is not supported with "
"select_for_update on this database backend."
)
nowait = self.query.select_for_update_nowait
skip_locked = self.query.select_for_update_skip_locked
of = self.query.select_for_update_of
no_key = self.query.select_for_no_key_update
# If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the
# backend doesn't support it, raise NotSupportedError to
# prevent a possible deadlock.
if nowait and not features.has_select_for_update_nowait:
raise NotSupportedError(
"NOWAIT is not supported on this database backend."
)
elif skip_locked and not features.has_select_for_update_skip_locked:
raise NotSupportedError(
"SKIP LOCKED is not supported on this database backend."
)
elif of and not features.has_select_for_update_of:
raise NotSupportedError(
"FOR UPDATE OF is not supported on this database backend."
)
elif no_key and not features.has_select_for_no_key_update:
raise NotSupportedError(
"FOR NO KEY UPDATE is not supported on this "
"database backend."
)
for_update_part = self.connection.ops.for_update_sql(
nowait=nowait,
skip_locked=skip_locked,
of=self.get_select_for_update_of_arguments(),
no_key=no_key,
)
if for_update_part and features.for_update_after_from:
result.append(for_update_part)
if where:
result.append("WHERE %s" % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) is not implemented."
)
order_by = order_by or self.connection.ops.force_no_ordering()
result.append("GROUP BY %s" % ", ".join(grouping))
if self._meta_ordering:
order_by = None
if having:
result.append("HAVING %s" % having)
params.extend(h_params)
if self.query.explain_info:
result.insert(
0,
self.connection.ops.explain_query_prefix(
self.query.explain_info.format,
**self.query.explain_info.options,
),
)
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
order_by_sql = "ORDER BY %s" % ", ".join(ordering)
if combinator and features.requires_compound_order_by_subquery:
result = ["SELECT * FROM (", *result, ")", order_by_sql]
else:
result.append(order_by_sql)
if with_limit_offset:
result.append(
self.connection.ops.limit_offset_sql(
self.query.low_mark, self.query.high_mark
)
)
if for_update_part and not features.for_update_after_from:
result.append(for_update_part)
if self.query.subquery and extra_select:
# If the query is used as a subquery, the extra selects would
# result in more columns than the left-hand side expression is
# expecting. This can happen when a subquery uses a combination
# of order_by() and distinct(), forcing the ordering expressions
# to be selected as well. Wrap the query in another subquery
# to exclude extraneous selects.
sub_selects = []
sub_params = []
for index, (select, _, alias) in enumerate(self.select, start=1):
if alias:
sub_selects.append(
"%s.%s"
% (
self.connection.ops.quote_name("subquery"),
self.connection.ops.quote_name(alias),
)
)
else:
select_clone = select.relabeled_clone(
{select.alias: "subquery"}
)
subselect, subparams = select_clone.as_sql(
self, self.connection
)
sub_selects.append(subselect)
sub_params.extend(subparams)
return "SELECT %s FROM (%s) subquery" % (
", ".join(sub_selects),
" ".join(result),
), tuple(sub_params + params)
return " ".join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def get_default_columns(
self, select_mask, start_alias=None, opts=None, from_parent=None
):
"""
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
if (opts := self.query.get_meta()) is None:
return result
start_alias = start_alias or self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if (
from_parent
and model is not None
and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model
)
):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if select_mask and field not in select_mask:
continue
alias = self.query.join_parent_model(opts, model, start_alias, seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Return a quoted list of fields to use in DISTINCT ON part of the query.
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
result = []
params = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _, transform_function = self._setup_joins(
parts, opts, None
)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(self.connection.ops.quote_name(name))
else:
r, p = self.compile(transform_function(target, alias))
result.append(r)
params.append(p)
return result, params
def find_ordering_name(
self, name, opts, alias=None, default_order="ASC", already_seen=None
):
"""
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = order == "DESC"
pieces = name.split(LOOKUP_SEP)
(
field,
targets,
alias,
joins,
path,
opts,
transform_function,
) = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless it is the pk
# shortcut or the attribute name of the field that is specified or
# there are transforms to process.
if (
field.is_relation
and opts.ordering
and getattr(field, "attname", None) != pieces[-1]
and name != "pk"
and not getattr(transform_function, "has_transforms", False)
):
# Firstly, avoid infinite loops.
already_seen = already_seen or set()
join_tuple = tuple(
getattr(self.query.alias_map[j], "join_cols", None) for j in joins
)
if join_tuple in already_seen:
raise FieldError("Infinite loop caused by ordering.")
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
if hasattr(item, "resolve_expression") and not isinstance(
item, OrderBy
):
item = item.desc() if descending else item.asc()
if isinstance(item, OrderBy):
results.append(
(item.prefix_references(f"{name}{LOOKUP_SEP}"), False)
)
continue
results.extend(
(expr.prefix_references(f"{name}{LOOKUP_SEP}"), is_ref)
for expr, is_ref in self.find_ordering_name(
item, opts, alias, order, already_seen
)
)
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [
(OrderBy(transform_function(t, alias), descending=descending), False)
for t in targets
]
def _setup_joins(self, pieces, opts, alias):
"""
Helper method for get_order_by() and get_distinct().
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
alias = alias or self.query.get_initial_alias()
field, targets, opts, joins, path, transform_function = self.query.setup_joins(
pieces, opts, alias
)
alias = joins[-1]
return field, targets, alias, joins, path, opts, transform_function
def get_from_clause(self):
"""
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
for alias in tuple(self.query.alias_map):
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if (
alias not in self.query.alias_map
or self.query.alias_refcount[alias] == 1
):
result.append(", %s" % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(
self,
select,
select_mask,
opts=None,
root_alias=None,
cur_depth=1,
requested=None,
restricted=None,
):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects
if f.field.unique
)
return chain(
direct_choices, reverse_choices, self.query._filtered_relations
)
related_klass_infos = []
if not restricted and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
restricted = isinstance(self.query.select_related, dict)
if restricted:
requested = self.query.select_related
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info["related_klass_infos"] = related_klass_infos
for f in opts.fields:
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or f.name in requested:
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s"
% (
f.name,
", ".join(_get_field_choices()) or "(none)",
)
)
else:
next = False
if not select_related_descend(f, restricted, requested, select_mask):
continue
related_select_mask = select_mask.get(f) or {}
klass_info = {
"model": f.remote_field.model,
"field": f,
"reverse": False,
"local_setter": f.set_cached_value,
"remote_setter": f.remote_field.set_cached_value
if f.unique
else lambda x, y: None,
"from_parent": False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _, _ = self.query.setup_joins([f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(
related_select_mask, start_alias=alias, opts=f.remote_field.model._meta
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next_klass_infos = self.get_related_selections(
select,
related_select_mask,
f.remote_field.model._meta,
alias,
cur_depth + 1,
next,
restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
related_select_mask = select_mask.get(f) or {}
if not select_related_descend(
f, restricted, requested, related_select_mask, reverse=True
):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
join_info = self.query.setup_joins(
[related_field_name], opts, root_alias
)
alias = join_info.joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
"model": model,
"field": f,
"reverse": True,
"local_setter": f.remote_field.set_cached_value,
"remote_setter": f.set_cached_value,
"from_parent": from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
related_select_mask,
start_alias=alias,
opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select,
related_select_mask,
model._meta,
alias,
cur_depth + 1,
next,
restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
def local_setter(obj, from_obj):
# Set a reverse fk object when relation is non-empty.
if from_obj:
f.remote_field.set_cached_value(from_obj, obj)
def remote_setter(name, obj, from_obj):
setattr(from_obj, name, obj)
for name in list(requested):
# Filtered relations work only on the topmost level.
if cur_depth > 1:
break
if name in self.query._filtered_relations:
fields_found.add(name)
f, _, join_opts, joins, _, _ = self.query.setup_joins(
[name], opts, root_alias
)
model = join_opts.model
alias = joins[-1]
from_parent = (
issubclass(model, opts.model) and model is not opts.model
)
klass_info = {
"model": model,
"field": f,
"reverse": True,
"local_setter": local_setter,
"remote_setter": partial(remote_setter, name),
"from_parent": from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
field_select_mask = select_mask.get((name, f)) or {}
columns = self.get_default_columns(
field_select_mask,
start_alias=alias,
opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next_requested = requested.get(name, {})
next_klass_infos = self.get_related_selections(
select,
field_select_mask,
opts=model._meta,
root_alias=alias,
cur_depth=cur_depth + 1,
requested=next_requested,
restricted=restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
"Invalid field name(s) given in select_related: %s. "
"Choices are: %s"
% (
", ".join(invalid_fields),
", ".join(_get_field_choices()) or "(none)",
)
)
return related_klass_infos
def get_select_for_update_of_arguments(self):
"""
Return a quoted list of arguments for the SELECT FOR UPDATE OF part of
the query.
"""
def _get_parent_klass_info(klass_info):
concrete_model = klass_info["model"]._meta.concrete_model
for parent_model, parent_link in concrete_model._meta.parents.items():
parent_list = parent_model._meta.get_parent_list()
yield {
"model": parent_model,
"field": parent_link,
"reverse": False,
"select_fields": [
select_index
for select_index in klass_info["select_fields"]
# Selected columns from a model or its parents.
if (
self.select[select_index][0].target.model == parent_model
or self.select[select_index][0].target.model in parent_list
)
],
}
def _get_first_selected_col_from_model(klass_info):
"""
Find the first selected column from a model. If it doesn't exist,
don't lock a model.
select_fields is filled recursively, so it also contains fields
from the parent models.
"""
concrete_model = klass_info["model"]._meta.concrete_model
for select_index in klass_info["select_fields"]:
if self.select[select_index][0].target.model == concrete_model:
return self.select[select_index][0]
def _get_field_choices():
"""Yield all allowed field paths in breadth-first search order."""
queue = collections.deque([(None, self.klass_info)])
while queue:
parent_path, klass_info = queue.popleft()
if parent_path is None:
path = []
yield "self"
else:
field = klass_info["field"]
if klass_info["reverse"]:
field = field.remote_field
path = parent_path + [field.name]
yield LOOKUP_SEP.join(path)
queue.extend(
(path, klass_info)
for klass_info in _get_parent_klass_info(klass_info)
)
queue.extend(
(path, klass_info)
for klass_info in klass_info.get("related_klass_infos", [])
)
if not self.klass_info:
return []
result = []
invalid_names = []
for name in self.query.select_for_update_of:
klass_info = self.klass_info
if name == "self":
col = _get_first_selected_col_from_model(klass_info)
else:
for part in name.split(LOOKUP_SEP):
klass_infos = (
*klass_info.get("related_klass_infos", []),
*_get_parent_klass_info(klass_info),
)
for related_klass_info in klass_infos:
field = related_klass_info["field"]
if related_klass_info["reverse"]:
field = field.remote_field
if field.name == part:
klass_info = related_klass_info
break
else:
klass_info = None
break
if klass_info is None:
invalid_names.append(name)
continue
col = _get_first_selected_col_from_model(klass_info)
if col is not None:
if self.connection.features.select_for_update_of_column:
result.append(self.compile(col)[0])
else:
result.append(self.quote_name_unless_alias(col.alias))
if invalid_names:
raise FieldError(
"Invalid field name(s) given in select_for_update(of=(...)): %s. "
"Only relational fields followed in the query are allowed. "
"Choices are: %s."
% (
", ".join(invalid_names),
", ".join(_get_field_choices()),
)
)
return result
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters + field_converters, expression)
return converters
def apply_converters(self, rows, converters):
connection = self.connection
converters = list(converters.items())
for row in map(list, rows):
for pos, (convs, expression) in converters:
value = row[pos]
for converter in convs:
value = converter(value, expression, connection)
row[pos] = value
yield row
def results_iter(
self,
results=None,
tuple_expected=False,
chunked_fetch=False,
chunk_size=GET_ITERATOR_CHUNK_SIZE,
):
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(
MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size
)
fields = [s[0] for s in self.select[0 : self.col_count]]
converters = self.get_converters(fields)
rows = chain.from_iterable(results)
if converters:
rows = self.apply_converters(rows, converters)
if tuple_expected:
rows = map(tuple, rows)
return rows
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
return bool(self.execute_sql(SINGLE))
def execute_sql(
self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE
):
"""
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
result_type = result_type or NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
if chunked_fetch:
cursor = self.connection.chunked_cursor()
else:
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
# Might fail for server-side cursors (e.g. connection closed)
cursor.close()
raise
if result_type == CURSOR:
# Give the caller the cursor to process and close.
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0 : self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor,
self.connection.features.empty_fetchmany_value,
self.col_count if self.has_extra_select else None,
chunk_size,
)
if not chunked_fetch or not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further. Use chunked_fetch if requested,
# unless the database doesn't support it.
return list(result)
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = "%s.%s" % (qn(alias), qn2(columns[index]))
self.query.where.add(RawSQL("%s = %s" % (lhs_sql, rhs), lhs_params), AND)
sql, params = self.as_sql()
return "EXISTS (%s)" % sql, params
def explain_query(self):
result = list(self.execute_sql())
# Some backends return 1 item tuples with strings, and others return
# tuples with integers and strings. Flatten them out into strings.
format_ = self.query.explain_info.format
output_formatter = json.dumps if format_ and format_.lower() == "json" else str
for row in result[0]:
if not isinstance(row, str):
yield " ".join(output_formatter(c) for c in row)
else:
yield row
class SQLInsertCompiler(SQLCompiler):
returning_fields = None
returning_params = ()
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, "as_sql"):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, "get_placeholder"):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = "%s", [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, "resolve_expression"):
value = value.resolve_expression(
self.query, allow_joins=False, for_save=True
)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
"can only be used to update, not to insert." % (value, field)
)
if value.contains_aggregate:
raise FieldError(
"Aggregate functions are not allowed in this query "
"(%s=%r)." % (field.name, value)
)
if value.contains_over_clause:
raise FieldError(
"Window expressions are not allowed in this query (%s=%r)."
% (field.name, value)
)
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
insert_statement = self.connection.ops.insert_statement(
on_conflict=self.query.on_conflict,
)
result = ["%s %s" % (insert_statement, qn(opts.db_table))]
fields = self.query.fields or [opts.pk]
result.append("(%s)" % ", ".join(qn(f.column) for f in fields))
if self.query.fields:
value_rows = [
[
self.prepare_value(field, self.pre_save_val(field, obj))
for field in fields
]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [
[self.connection.ops.pk_default_value()] for _ in self.query.objs
]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (
not self.returning_fields and self.connection.features.has_bulk_insert
)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
on_conflict_suffix_sql = self.connection.ops.on_conflict_suffix_sql(
fields,
self.query.on_conflict,
self.query.update_fields,
self.query.unique_fields,
)
if (
self.returning_fields
and self.connection.features.can_return_columns_from_insert
):
if self.connection.features.can_return_rows_from_bulk_insert:
result.append(
self.connection.ops.bulk_insert_sql(fields, placeholder_rows)
)
params = param_rows
else:
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
params = [param_rows[0]]
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
# Skip empty r_sql to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
r_sql, self.returning_params = self.connection.ops.return_insert_columns(
self.returning_fields
)
if r_sql:
result.append(r_sql)
params += [self.returning_params]
return [(" ".join(result), tuple(chain.from_iterable(params)))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, returning_fields=None):
assert not (
returning_fields
and len(self.query.objs) != 1
and not self.connection.features.can_return_rows_from_bulk_insert
)
opts = self.query.get_meta()
self.returning_fields = returning_fields
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not self.returning_fields:
return []
if (
self.connection.features.can_return_rows_from_bulk_insert
and len(self.query.objs) > 1
):
rows = self.connection.ops.fetch_returned_insert_rows(cursor)
elif self.connection.features.can_return_columns_from_insert:
assert len(self.query.objs) == 1
rows = [
self.connection.ops.fetch_returned_insert_columns(
cursor,
self.returning_params,
)
]
else:
rows = [
(
self.connection.ops.last_insert_id(
cursor,
opts.db_table,
opts.pk.column,
),
)
]
cols = [field.get_col(opts.db_table) for field in self.returning_fields]
converters = self.get_converters(cols)
if converters:
rows = list(self.apply_converters(rows, converters))
return rows
class SQLDeleteCompiler(SQLCompiler):
@cached_property
def single_alias(self):
# Ensure base table is in aliases.
self.query.get_initial_alias()
return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1
@classmethod
def _expr_refs_base_model(cls, expr, base_model):
if isinstance(expr, Query):
return expr.model == base_model
if not hasattr(expr, "get_source_expressions"):
return False
return any(
cls._expr_refs_base_model(source_expr, base_model)
for source_expr in expr.get_source_expressions()
)
@cached_property
def contains_self_reference_subquery(self):
return any(
self._expr_refs_base_model(expr, self.query.model)
for expr in chain(
self.query.annotations.values(), self.query.where.children
)
)
def _as_sql(self, query):
result = ["DELETE FROM %s" % self.quote_name_unless_alias(query.base_table)]
where, params = self.compile(query.where)
if where:
result.append("WHERE %s" % where)
return " ".join(result), tuple(params)
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
if self.single_alias and not self.contains_self_reference_subquery:
return self._as_sql(self.query)
innerq = self.query.clone()
innerq.__class__ = Query
innerq.clear_select_clause()
pk = self.query.model._meta.pk
innerq.select = [pk.get_col(self.query.get_initial_alias())]
outerq = Query(self.query.model)
if not self.connection.features.update_can_self_select:
# Force the materialization of the inner query to allow reference
# to the target table on MySQL.
sql, params = innerq.get_compiler(connection=self.connection).as_sql()
innerq = RawSQL("SELECT * FROM (%s) subquery" % sql, params)
outerq.add_filter("pk__in", innerq)
return self._as_sql(outerq)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return "", ()
qn = self.quote_name_unless_alias
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, "resolve_expression"):
val = val.resolve_expression(
self.query, allow_joins=False, for_save=True
)
if val.contains_aggregate:
raise FieldError(
"Aggregate functions are not allowed in this query "
"(%s=%r)." % (field.name, val)
)
if val.contains_over_clause:
raise FieldError(
"Window expressions are not allowed in this query "
"(%s=%r)." % (field.name, val)
)
elif hasattr(val, "prepare_database_save"):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, "get_placeholder"):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = "%s"
name = field.column
if hasattr(val, "as_sql"):
sql, params = self.compile(val)
values.append("%s = %s" % (qn(name), placeholder % sql))
update_params.extend(params)
elif val is not None:
values.append("%s = %s" % (qn(name), placeholder))
update_params.append(val)
else:
values.append("%s = NULL" % qn(name))
table = self.query.base_table
result = [
"UPDATE %s SET" % qn(table),
", ".join(values),
]
where, params = self.compile(self.query.where)
if where:
result.append("WHERE %s" % where)
return " ".join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super().execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.chain(klass=Query)
query.select_related = False
query.clear_ordering(force=True)
query.extra = {}
query.select = []
meta = query.get_meta()
fields = [meta.pk.name]
related_ids_index = []
for related in self.query.related_updates:
if all(
path.join_field.primary_key for path in meta.get_path_to_parent(related)
):
# If a primary key chain exists to the targeted related update,
# then the meta.pk value can be used for it.
related_ids_index.append((related, 0))
else:
# This branch will only be reached when updating a field of an
# ancestor that is not part of the primary key chain of a MTI
# tree.
related_ids_index.append((related, len(fields)))
fields.append(related._meta.pk.name)
query.add_fields(fields)
super().pre_sql_setup()
must_pre_select = (
count > 1 and not self.connection.features.update_can_self_select
)
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.clear_where()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
related_ids = collections.defaultdict(list)
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
for parent, index in related_ids_index:
related_ids[parent].extend(r[index] for r in rows)
self.query.add_filter("pk__in", idents)
self.query.related_ids = related_ids
else:
# The fast path. Filters and updates in one query.
self.query.add_filter("pk__in", query)
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation)
ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ", ".join(sql)
params = tuple(params)
inner_query_sql, inner_query_params = self.query.inner_query.get_compiler(
self.using,
elide_empty=self.elide_empty,
).as_sql(with_col_aliases=True)
sql = "SELECT %s FROM (%s) subquery" % (sql, inner_query_sql)
params = params + inner_query_params
return sql, params
def cursor_iter(cursor, sentinel, col_count, itersize):
"""
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel):
yield rows if col_count is None else [r[:col_count] for r in rows]
finally:
cursor.close()
|
6c0baa560a42f43f923c61a51786bf016afe235ae59d48579ce702c2fb0c6cc0 | import operator
from django.db.backends.base.features import BaseDatabaseFeatures
from django.utils.functional import cached_property
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
allows_group_by_pk = True
related_fields_match_type = True
# MySQL doesn't support sliced subqueries with IN/ALL/ANY/SOME.
allow_sliced_subqueries_with_in = False
has_select_for_update = True
supports_forward_references = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
can_release_savepoints = True
atomic_transactions = False
can_clone_databases = True
supports_temporal_subtraction = True
supports_slicing_ordering_in_compound = True
supports_index_on_text_field = False
supports_update_conflicts = True
create_test_procedure_without_params_sql = """
CREATE PROCEDURE test_procedure ()
BEGIN
DECLARE V_I INTEGER;
SET V_I = 1;
END;
"""
create_test_procedure_with_int_param_sql = """
CREATE PROCEDURE test_procedure (P_I INTEGER)
BEGIN
DECLARE V_I INTEGER;
SET V_I = P_I;
END;
"""
create_test_table_with_composite_primary_key = """
CREATE TABLE test_table_composite_pk (
column_1 INTEGER NOT NULL,
column_2 INTEGER NOT NULL,
PRIMARY KEY(column_1, column_2)
)
"""
# Neither MySQL nor MariaDB support partial indexes.
supports_partial_indexes = False
# COLLATE must be wrapped in parentheses because MySQL treats COLLATE as an
# indexed expression.
collate_as_index_expression = True
supports_order_by_nulls_modifier = False
order_by_nulls_first = True
supports_logical_xor = True
@cached_property
def minimum_database_version(self):
if self.connection.mysql_is_mariadb:
return (10, 4)
else:
return (8,)
@cached_property
def test_collations(self):
charset = "utf8"
if (
self.connection.mysql_is_mariadb
and self.connection.mysql_version >= (10, 6)
) or (
not self.connection.mysql_is_mariadb
and self.connection.mysql_version >= (8, 0, 30)
):
# utf8 is an alias for utf8mb3 in MariaDB 10.6+ and MySQL 8.0.30+.
charset = "utf8mb3"
return {
"ci": f"{charset}_general_ci",
"non_default": f"{charset}_esperanto_ci",
"swedish_ci": f"{charset}_swedish_ci",
}
test_now_utc_template = "UTC_TIMESTAMP(6)"
@cached_property
def django_test_skips(self):
skips = {
"This doesn't work on MySQL.": {
"db_functions.comparison.test_greatest.GreatestTests."
"test_coalesce_workaround",
"db_functions.comparison.test_least.LeastTests."
"test_coalesce_workaround",
},
"Running on MySQL requires utf8mb4 encoding (#18392).": {
"model_fields.test_textfield.TextFieldTests.test_emoji",
"model_fields.test_charfield.TestCharField.test_emoji",
},
"MySQL doesn't support functional indexes on a function that "
"returns JSON": {
"schema.tests.SchemaTests.test_func_index_json_key_transform",
},
"MySQL supports multiplying and dividing DurationFields by a "
"scalar value but it's not implemented (#25287).": {
"expressions.tests.FTimeDeltaTests.test_durationfield_multiply_divide",
},
"UPDATE ... ORDER BY syntax on MySQL/MariaDB does not support ordering by"
"related fields.": {
"update.tests.AdvancedTests."
"test_update_ordered_by_inline_m2m_annotation",
"update.tests.AdvancedTests.test_update_ordered_by_m2m_annotation",
},
}
if "ONLY_FULL_GROUP_BY" in self.connection.sql_mode:
skips.update(
{
"GROUP BY optimization does not work properly when "
"ONLY_FULL_GROUP_BY mode is enabled on MySQL, see #31331.": {
"aggregation.tests.AggregateTestCase."
"test_aggregation_subquery_annotation_multivalued",
"annotations.tests.NonAggregateAnnotationTestCase."
"test_annotation_aggregate_with_m2o",
},
}
)
if self.connection.mysql_is_mariadb and (
10,
4,
3,
) < self.connection.mysql_version < (10, 5, 2):
skips.update(
{
"https://jira.mariadb.org/browse/MDEV-19598": {
"schema.tests.SchemaTests."
"test_alter_not_unique_field_to_primary_key",
},
}
)
if self.connection.mysql_is_mariadb and (
10,
4,
12,
) < self.connection.mysql_version < (10, 5):
skips.update(
{
"https://jira.mariadb.org/browse/MDEV-22775": {
"schema.tests.SchemaTests."
"test_alter_pk_with_self_referential_field",
},
}
)
if not self.supports_explain_analyze:
skips.update(
{
"MariaDB and MySQL >= 8.0.18 specific.": {
"queries.test_explain.ExplainTests.test_mysql_analyze",
},
}
)
return skips
@cached_property
def _mysql_storage_engine(self):
"Internal method used in Django tests. Don't rely on this from your code"
return self.connection.mysql_server_data["default_storage_engine"]
@cached_property
def allows_auto_pk_0(self):
"""
Autoincrement primary key can be set to 0 if it doesn't generate new
autoincrement values.
"""
return "NO_AUTO_VALUE_ON_ZERO" in self.connection.sql_mode
@cached_property
def update_can_self_select(self):
return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (
10,
3,
2,
)
@cached_property
def can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
return self._mysql_storage_engine != "MyISAM"
@cached_property
def introspected_field_types(self):
return {
**super().introspected_field_types,
"BinaryField": "TextField",
"BooleanField": "IntegerField",
"DurationField": "BigIntegerField",
"GenericIPAddressField": "CharField",
}
@cached_property
def can_return_columns_from_insert(self):
return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (
10,
5,
0,
)
can_return_rows_from_bulk_insert = property(
operator.attrgetter("can_return_columns_from_insert")
)
@cached_property
def has_zoneinfo_database(self):
return self.connection.mysql_server_data["has_zoneinfo_database"]
@cached_property
def is_sql_auto_is_null_enabled(self):
return self.connection.mysql_server_data["sql_auto_is_null"]
@cached_property
def supports_over_clause(self):
if self.connection.mysql_is_mariadb:
return True
return self.connection.mysql_version >= (8, 0, 2)
supports_frame_range_fixed_distance = property(
operator.attrgetter("supports_over_clause")
)
@cached_property
def supports_column_check_constraints(self):
if self.connection.mysql_is_mariadb:
return True
return self.connection.mysql_version >= (8, 0, 16)
supports_table_check_constraints = property(
operator.attrgetter("supports_column_check_constraints")
)
@cached_property
def can_introspect_check_constraints(self):
if self.connection.mysql_is_mariadb:
return True
return self.connection.mysql_version >= (8, 0, 16)
@cached_property
def has_select_for_update_skip_locked(self):
if self.connection.mysql_is_mariadb:
return self.connection.mysql_version >= (10, 6)
return self.connection.mysql_version >= (8, 0, 1)
@cached_property
def has_select_for_update_nowait(self):
if self.connection.mysql_is_mariadb:
return True
return self.connection.mysql_version >= (8, 0, 1)
@cached_property
def has_select_for_update_of(self):
return (
not self.connection.mysql_is_mariadb
and self.connection.mysql_version >= (8, 0, 1)
)
@cached_property
def supports_explain_analyze(self):
return self.connection.mysql_is_mariadb or self.connection.mysql_version >= (
8,
0,
18,
)
@cached_property
def supported_explain_formats(self):
# Alias MySQL's TRADITIONAL to TEXT for consistency with other
# backends.
formats = {"JSON", "TEXT", "TRADITIONAL"}
if not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (
8,
0,
16,
):
formats.add("TREE")
return formats
@cached_property
def supports_transactions(self):
"""
All storage engines except MyISAM support transactions.
"""
return self._mysql_storage_engine != "MyISAM"
uses_savepoints = property(operator.attrgetter("supports_transactions"))
can_release_savepoints = property(operator.attrgetter("supports_transactions"))
@cached_property
def ignores_table_name_case(self):
return self.connection.mysql_server_data["lower_case_table_names"]
@cached_property
def supports_default_in_lead_lag(self):
# To be added in https://jira.mariadb.org/browse/MDEV-12981.
return not self.connection.mysql_is_mariadb
@cached_property
def can_introspect_json_field(self):
if self.connection.mysql_is_mariadb:
return self.can_introspect_check_constraints
return True
@cached_property
def supports_index_column_ordering(self):
if self._mysql_storage_engine != "InnoDB":
return False
if self.connection.mysql_is_mariadb:
return self.connection.mysql_version >= (10, 8)
return self.connection.mysql_version >= (8, 0, 1)
@cached_property
def supports_expression_indexes(self):
return (
not self.connection.mysql_is_mariadb
and self._mysql_storage_engine != "MyISAM"
and self.connection.mysql_version >= (8, 0, 13)
)
@cached_property
def supports_select_intersection(self):
is_mariadb = self.connection.mysql_is_mariadb
return is_mariadb or self.connection.mysql_version >= (8, 0, 31)
supports_select_difference = property(
operator.attrgetter("supports_select_intersection")
)
@cached_property
def can_rename_index(self):
if self.connection.mysql_is_mariadb:
return self.connection.mysql_version >= (10, 5, 2)
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.